_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q30300
generate_scope_string
train
def generate_scope_string(date, region): """ Generate scope string. :param date: Date is input from :meth:`datetime.datetime` :param region: Region should be set to bucket region. """ formatted_date = date.strftime("%Y%m%d") scope = '/'.join([formatted_date, region, 's3', 'aws4_request']) return scope
python
{ "resource": "" }
q30301
generate_authorization_header
train
def generate_authorization_header(access_key, date, region, signed_headers, signature): """ Generate authorization header. :param access_key: Server access key. :param date: Date is input from :meth:`datetime.datetime` :param region: Region should be set to bucket region. :param signed_headers: Signed headers. :param signature: Calculated signature. """ signed_headers_string = ';'.join(signed_headers) credential = generate_credential_string(access_key, date, region) auth_header = [_SIGN_V4_ALGORITHM, 'Credential=' + credential + ',', 'SignedHeaders=' + signed_headers_string + ',', 'Signature=' + signature] return ' '.join(auth_header)
python
{ "resource": "" }
q30302
parse_multipart_upload_result
train
def parse_multipart_upload_result(data): """ Parser for complete multipart upload response. :param data: Response data for complete multipart upload. :return: :class:`MultipartUploadResult <MultipartUploadResult>`. """ root = S3Element.fromstring('CompleteMultipartUploadResult', data) return MultipartUploadResult( root.get_child_text('Bucket'), root.get_child_text('Key'), root.get_child_text('Location'), root.get_etag_elem() )
python
{ "resource": "" }
q30303
parse_copy_object
train
def parse_copy_object(bucket_name, object_name, data): """ Parser for copy object response. :param data: Response data for copy object. :return: :class:`CopyObjectResult <CopyObjectResult>` """ root = S3Element.fromstring('CopyObjectResult', data) return CopyObjectResult( bucket_name, object_name, root.get_etag_elem(), root.get_localized_time_elem('LastModified') )
python
{ "resource": "" }
q30304
parse_list_buckets
train
def parse_list_buckets(data): """ Parser for list buckets response. :param data: Response data for list buckets. :return: List of :class:`Bucket <Bucket>`. """ root = S3Element.fromstring('ListBucketsResult', data) return [ Bucket(bucket.get_child_text('Name'), bucket.get_localized_time_elem('CreationDate')) for buckets in root.findall('Buckets') for bucket in buckets.findall('Bucket') ]
python
{ "resource": "" }
q30305
_parse_objects_from_xml_elts
train
def _parse_objects_from_xml_elts(bucket_name, contents, common_prefixes): """Internal function that extracts objects and common prefixes from list_objects responses. """ objects = [ Object(bucket_name, content.get_child_text('Key'), content.get_localized_time_elem('LastModified'), content.get_etag_elem(strict=False), content.get_int_elem('Size'), is_dir=content.is_dir()) for content in contents ] object_dirs = [ Object(bucket_name, dir_elt.text(), None, '', 0, is_dir=True) for dirs_elt in common_prefixes for dir_elt in dirs_elt.findall('Prefix') ] return objects, object_dirs
python
{ "resource": "" }
q30306
parse_list_objects
train
def parse_list_objects(data, bucket_name): """ Parser for list objects response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Replies back three distinctive components. - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Object name marker for the next request. """ root = S3Element.fromstring('ListObjectResult', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' # NextMarker element need not be present. marker = root.get_urldecoded_elem_text('NextMarker', strict=False) objects, object_dirs = _parse_objects_from_xml_elts( bucket_name, root.findall('Contents'), root.findall('CommonPrefixes') ) if is_truncated and marker is None: marker = objects[-1].object_name return objects + object_dirs, is_truncated, marker
python
{ "resource": "" }
q30307
parse_list_objects_v2
train
def parse_list_objects_v2(data, bucket_name): """ Parser for list objects version 2 response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Returns three distinct components: - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Continuation Token for the next request. """ root = S3Element.fromstring('ListObjectV2Result', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' # NextContinuationToken may not be present. continuation_token = root.get_child_text('NextContinuationToken', strict=False) objects, object_dirs = _parse_objects_from_xml_elts( bucket_name, root.findall('Contents'), root.findall('CommonPrefixes') ) return objects + object_dirs, is_truncated, continuation_token
python
{ "resource": "" }
q30308
parse_list_multipart_uploads
train
def parse_list_multipart_uploads(data, bucket_name): """ Parser for list multipart uploads response. :param data: Response data for list multipart uploads. :param bucket_name: Response for the bucket. :return: Replies back four distinctive components. - List of :class:`IncompleteUpload <IncompleteUpload>` - True if list is truncated, False otherwise. - Object name marker for the next request. - Upload id marker for the next request. """ root = S3Element.fromstring('ListMultipartUploadsResult', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' key_marker = root.get_urldecoded_elem_text('NextKeyMarker', strict=False) upload_id_marker = root.get_child_text('NextUploadIdMarker', strict=False) uploads = [ IncompleteUpload(bucket_name, upload.get_urldecoded_elem_text('Key'), upload.get_child_text('UploadId'), upload.get_localized_time_elem('Initiated')) for upload in root.findall('Upload') ] return uploads, is_truncated, key_marker, upload_id_marker
python
{ "resource": "" }
q30309
parse_list_parts
train
def parse_list_parts(data, bucket_name, object_name, upload_id): """ Parser for list parts response. :param data: Response data for list parts. :param bucket_name: Response for the bucket. :param object_name: Response for the object. :param upload_id: Upload id of object name for the active multipart session. :return: Replies back three distinctive components. - List of :class:`UploadPart <UploadPart>`. - True if list is truncated, False otherwise. - Next part marker for the next request if the list was truncated. """ root = S3Element.fromstring('ListPartsResult', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' part_marker = root.get_child_text('NextPartNumberMarker', strict=False) parts = [ UploadPart(bucket_name, object_name, upload_id, part.get_int_elem('PartNumber'), part.get_etag_elem(), part.get_localized_time_elem('LastModified'), part.get_int_elem('Size')) for part in root.findall('Part') ] return parts, is_truncated, part_marker
python
{ "resource": "" }
q30310
_iso8601_to_localized_time
train
def _iso8601_to_localized_time(date_string): """ Convert iso8601 date string into UTC time. :param date_string: iso8601 formatted date string. :return: :class:`datetime.datetime` """ parsed_date = datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ') localized_time = pytz.utc.localize(parsed_date) return localized_time
python
{ "resource": "" }
q30311
parse_get_bucket_notification
train
def parse_get_bucket_notification(data): """ Parser for a get_bucket_notification response from S3. :param data: Body of response from get_bucket_notification. :return: Returns bucket notification configuration """ root = S3Element.fromstring('GetBucketNotificationResult', data) notifications = _parse_add_notifying_service_config( root, {}, 'TopicConfigurations', 'TopicConfiguration' ) notifications = _parse_add_notifying_service_config( root, notifications, 'QueueConfigurations', 'QueueConfiguration' ) notifications = _parse_add_notifying_service_config( root, notifications, 'CloudFunctionConfigurations', 'CloudFunctionConfiguration' ) return notifications
python
{ "resource": "" }
q30312
parse_multi_object_delete_response
train
def parse_multi_object_delete_response(data): """Parser for Multi-Object Delete API response. :param data: XML response body content from service. :return: Returns list of error objects for each delete object that had an error. """ root = S3Element.fromstring('MultiObjectDeleteResult', data) return [ MultiDeleteError(errtag.get_child_text('Key'), errtag.get_child_text('Code'), errtag.get_child_text('Message')) for errtag in root.findall('Error') ]
python
{ "resource": "" }
q30313
S3Element.fromstring
train
def fromstring(cls, root_name, data): """Initialize S3Element from name and XML string data. :param name: Name for XML data. Used in XML errors. :param data: string data to be parsed. :return: Returns an S3Element. """ try: return cls(root_name, cElementTree.fromstring(data)) except _ETREE_EXCEPTIONS as error: raise InvalidXMLError( '"{}" XML is not parsable. Message: {}'.format( root_name, error.message ) )
python
{ "resource": "" }
q30314
S3Element.get_child_text
train
def get_child_text(self, name, strict=True): """Extract text of a child element. If strict, and child element is not present, raises InvalidXMLError and otherwise returns None. """ if strict: try: return self.element.find('s3:{}'.format(name), _S3_NS).text except _ETREE_EXCEPTIONS as error: raise InvalidXMLError( ('Invalid XML provided for "{}" - erroring tag <{}>. ' 'Message: {}').format(self.root_name, name, error.message) ) else: return self.element.findtext('s3:{}'.format(name), None, _S3_NS)
python
{ "resource": "" }
q30315
dump_http
train
def dump_http(method, url, request_headers, response, output_stream): """ Dump all headers and response headers into output_stream. :param request_headers: Dictionary of HTTP request headers. :param response_headers: Dictionary of HTTP response headers. :param output_stream: Stream where the request is being dumped at. """ # Start header. output_stream.write('---------START-HTTP---------\n') # Get parsed url. parsed_url = urlsplit(url) # Dump all request headers recursively. http_path = parsed_url.path if parsed_url.query: http_path = http_path + '?' + parsed_url.query output_stream.write('{0} {1} HTTP/1.1\n'.format(method, http_path)) for k, v in list(request_headers.items()): if k is 'authorization': # Redact signature header value from trace logs. v = re.sub(r'Signature=([[0-9a-f]+)', 'Signature=*REDACTED*', v) output_stream.write('{0}: {1}\n'.format(k.title(), v)) # Write a new line. output_stream.write('\n') # Write response status code. output_stream.write('HTTP/1.1 {0}\n'.format(response.status)) # Dump all response headers recursively. for k, v in list(response.getheaders().items()): output_stream.write('{0}: {1}\n'.format(k.title(), v)) # For all errors write all the available response body. if response.status != 200 and \ response.status != 204 and response.status != 206: output_stream.write('{0}'.format(response.read())) # End header. output_stream.write('---------END-HTTP---------\n')
python
{ "resource": "" }
q30316
read_full
train
def read_full(data, size): """ read_full reads exactly `size` bytes from reader. returns `size` bytes. :param data: Input stream to read from. :param size: Number of bytes to read from `data`. :return: Returns :bytes:`part_data` """ default_read_size = 32768 # 32KiB per read operation. chunk = io.BytesIO() chunk_size = 0 while chunk_size < size: read_size = default_read_size if (size - chunk_size) < default_read_size: read_size = size - chunk_size current_data = data.read(read_size) if not current_data or len(current_data) == 0: break chunk.write(current_data) chunk_size+= len(current_data) return chunk.getvalue()
python
{ "resource": "" }
q30317
get_target_url
train
def get_target_url(endpoint_url, bucket_name=None, object_name=None, bucket_region='us-east-1', query=None): """ Construct final target url. :param endpoint_url: Target endpoint url where request is served to. :param bucket_name: Bucket component for the target url. :param object_name: Object component for the target url. :param bucket_region: Bucket region for the target url. :param query: Query parameters as a *dict* for the target url. :return: Returns final target url as *str*. """ # New url url = None # Parse url parsed_url = urlsplit(endpoint_url) # Get new host, scheme. scheme = parsed_url.scheme host = parsed_url.netloc # Strip 80/443 ports since curl & browsers do not # send them in Host header. if (scheme == 'http' and parsed_url.port == 80) or\ (scheme == 'https' and parsed_url.port == 443): host = parsed_url.hostname if 's3.amazonaws.com' in host: host = get_s3_endpoint(bucket_region) url = scheme + '://' + host if bucket_name: # Save if target url will have buckets which suppport # virtual host. is_virtual_host_style = is_virtual_host(endpoint_url, bucket_name) if is_virtual_host_style: url = (scheme + '://' + bucket_name + '.' + host) else: url = (scheme + '://' + host + '/' + bucket_name) url_components = [url] url_components.append('/') if object_name: object_name = encode_object_name(object_name) url_components.append(object_name) if query: ordered_query = collections.OrderedDict(sorted(query.items())) query_components = [] for component_key in ordered_query: if isinstance(ordered_query[component_key], list): for value in ordered_query[component_key]: query_components.append(component_key+'='+ queryencode(value)) else: query_components.append( component_key+'='+ queryencode(ordered_query.get(component_key, ''))) query_string = '&'.join(query_components) if query_string: url_components.append('?') url_components.append(query_string) return ''.join(url_components)
python
{ "resource": "" }
q30318
is_valid_endpoint
train
def is_valid_endpoint(endpoint): """ Verify if endpoint is valid. :type endpoint: string :param endpoint: An endpoint. Must have at least a scheme and a hostname. :return: True if the endpoint is valid. Raise :exc:`InvalidEndpointError` otherwise. """ try: if urlsplit(endpoint).scheme: raise InvalidEndpointError('Hostname cannot have a scheme.') hostname = endpoint.split(':')[0] if hostname is None: raise InvalidEndpointError('Hostname cannot be empty.') if len(hostname) > 255: raise InvalidEndpointError('Hostname cannot be greater than 255.') if hostname[-1] == '.': hostname = hostname[:-1] if not _ALLOWED_HOSTNAME_REGEX.match(hostname): raise InvalidEndpointError('Hostname does not meet URL standards.') except AttributeError as error: raise TypeError(error) return True
python
{ "resource": "" }
q30319
is_virtual_host
train
def is_virtual_host(endpoint_url, bucket_name): """ Check to see if the ``bucket_name`` can be part of virtual host style. :param endpoint_url: Endpoint url which will be used for virtual host. :param bucket_name: Bucket name to be validated against. """ is_valid_bucket_name(bucket_name) parsed_url = urlsplit(endpoint_url) # bucket_name can be valid but '.' in the hostname will fail # SSL certificate validation. So do not use host-style for # such buckets. if 'https' in parsed_url.scheme and '.' in bucket_name: return False for host in ['s3.amazonaws.com', 'aliyuncs.com']: if host in parsed_url.netloc: return True return False
python
{ "resource": "" }
q30320
is_valid_bucket_name
train
def is_valid_bucket_name(bucket_name): """ Check to see if the ``bucket_name`` complies with the restricted DNS naming conventions necessary to allow access via virtual-hosting style. :param bucket_name: Bucket name in *str*. :return: True if the bucket is valid. Raise :exc:`InvalidBucketError` otherwise. """ # Verify bucket name length. if len(bucket_name) < 3: raise InvalidBucketError('Bucket name cannot be less than' ' 3 characters.') if len(bucket_name) > 63: raise InvalidBucketError('Bucket name cannot be more than' ' 63 characters.') if '..' in bucket_name: raise InvalidBucketError('Bucket name cannot have successive' ' periods.') match = _VALID_BUCKETNAME_REGEX.match(bucket_name) if match is None or match.end() != len(bucket_name): raise InvalidBucketError('Bucket name does not follow S3 standards.' ' Bucket: {0}'.format(bucket_name)) return True
python
{ "resource": "" }
q30321
is_non_empty_string
train
def is_non_empty_string(input_string): """ Validate if non empty string :param input_string: Input is a *str*. :return: True if input is string and non empty. Raise :exc:`Exception` otherwise. """ try: if not input_string.strip(): raise ValueError() except AttributeError as error: raise TypeError(error) return True
python
{ "resource": "" }
q30322
is_valid_policy_type
train
def is_valid_policy_type(policy): """ Validate if policy is type str :param policy: S3 style Bucket policy. :return: True if policy parameter is of a valid type, 'string'. Raise :exc:`TypeError` otherwise. """ if _is_py3: string_type = str, elif _is_py2: string_type = basestring if not isinstance(policy, string_type): raise TypeError('policy can only be of type str') is_non_empty_string(policy) return True
python
{ "resource": "" }
q30323
is_valid_sse_object
train
def is_valid_sse_object(sse): """ Validate the SSE object and type :param sse: SSE object defined. """ if sse and sse.type() != "SSE-C" and sse.type() != "SSE-KMS" and sse.type() != "SSE-S3": raise InvalidArgumentError("unsuported type of sse argument in put_object")
python
{ "resource": "" }
q30324
optimal_part_info
train
def optimal_part_info(length, part_size): """ Calculate optimal part size for multipart uploads. :param length: Input length to calculate part size of. :return: Optimal part size. """ # object size is '-1' set it to 5TiB. if length == -1: length = MAX_MULTIPART_OBJECT_SIZE if length > MAX_MULTIPART_OBJECT_SIZE: raise InvalidArgumentError('Input content size is bigger ' ' than allowed maximum of 5TiB.') # honor user configured size if part_size != MIN_PART_SIZE: part_size_float = float(part_size) else: # Use floats for part size for all calculations to avoid # overflows during float64 to int64 conversions. part_size_float = math.ceil(length/MAX_MULTIPART_COUNT) part_size_float = (math.ceil(part_size_float/part_size) * part_size) # Total parts count. total_parts_count = int(math.ceil(length/part_size_float)) # Part size. part_size = int(part_size_float) # Last part size. last_part_size = length - int(total_parts_count-1)*part_size return total_parts_count, part_size, last_part_size
python
{ "resource": "" }
q30325
urlencode
train
def urlencode(resource): """ This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded. """ if isinstance(resource, str): return _urlencode(resource.encode('utf-8')) return _urlencode(resource)
python
{ "resource": "" }
q30326
ResponseError.get_exception
train
def get_exception(self): """ Gets the error exception derived from the initialization of an ErrorResponse object :return: The derived exception or ResponseError exception """ exception = known_errors.get(self.code) if exception: return exception(self) else: return self
python
{ "resource": "" }
q30327
ResponseError._handle_error_response
train
def _handle_error_response(self, bucket_name=None): """ Sets error response uses xml body if available, otherwise relies on HTTP headers. """ if not self._response.data: self._set_error_response_without_body(bucket_name) else: self._set_error_response_with_body(bucket_name)
python
{ "resource": "" }
q30328
ResponseError._set_error_response_without_body
train
def _set_error_response_without_body(self, bucket_name=None): """ Sets all the error response fields from response headers. """ if self._response.status == 404: if bucket_name: if self.object_name: self.code = 'NoSuchKey' self.message = self._response.reason else: self.code = 'NoSuchBucket' self.message = self._response.reason elif self._response.status == 409: self.code = 'Conflict' self.message = 'The bucket you tried to delete is not empty.' elif self._response.status == 403: self.code = 'AccessDenied' self.message = self._response.reason elif self._response.status == 400: self.code = 'BadRequest' self.message = self._response.reason elif self._response.status == 301: self.code = 'PermanentRedirect' self.message = self._response.reason elif self._response.status == 307: self.code = 'Redirect' self.message = self._response.reason elif self._response.status in [405, 501]: self.code = 'MethodNotAllowed' self.message = self._response.reason elif self._response.status == 500: self.code = 'InternalError' self.message = 'Internal Server Error.' else: self.code = 'UnknownException' self.message = self._response.reason # Set amz headers. self._set_amz_headers()
python
{ "resource": "" }
q30329
Minio.set_app_info
train
def set_app_info(self, app_name, app_version): """ Sets your application name and version to default user agent in the following format. MinIO (OS; ARCH) LIB/VER APP/VER Example: client.set_app_info('my_app', '1.0.2') :param app_name: application name. :param app_version: application version. """ if not (app_name and app_version): raise ValueError('app_name and app_version cannot be empty.') app_info = _APP_INFO.format(app_name, app_version) self._user_agent = ' '.join([_DEFAULT_USER_AGENT, app_info])
python
{ "resource": "" }
q30330
Minio.make_bucket
train
def make_bucket(self, bucket_name, location='us-east-1'): """ Make a new bucket on the server. Optionally include Location. ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1', 'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1', 'cn-north-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ap-northeast-2'] Examples: minio.make_bucket('foo') minio.make_bucket('foo', 'us-west-1') :param bucket_name: Bucket to create on server :param location: Location to create bucket on """ is_valid_bucket_name(bucket_name) # Default region for all requests. region = 'us-east-1' if self._region: region = self._region # Validate if caller requested bucket location is same as current region if self._region != location: raise InvalidArgumentError("Configured region {0}, requested" " {1}".format(self._region, location)) method = 'PUT' # Set user agent once before the request. headers = {'User-Agent': self._user_agent} content = None if location and location != 'us-east-1': content = xml_marshal_bucket_constraint(location) headers['Content-Length'] = str(len(content)) content_sha256_hex = get_sha256_hexdigest(content) if content: headers['Content-Md5'] = get_md5_base64digest(content) # In case of Amazon S3. The make bucket issued on already # existing bucket would fail with 'AuthorizationMalformed' # error if virtual style is used. So we default to 'path # style' as that is the preferred method here. The final # location of the 'bucket' is provided through XML # LocationConstraint data with the request. # Construct target url. url = self._endpoint_url + '/' + bucket_name + '/' # Get signature headers if any. headers = sign_v4(method, url, region, headers, self._access_key, self._secret_key, self._session_token, content_sha256_hex) response = self._http.urlopen(method, url, body=content, headers=headers) if response.status != 200: raise ResponseError(response, method, bucket_name).get_exception() self._set_bucket_region(bucket_name, region=location)
python
{ "resource": "" }
q30331
Minio.list_buckets
train
def list_buckets(self): """ List all buckets owned by the user. Example: bucket_list = minio.list_buckets() for bucket in bucket_list: print(bucket.name, bucket.created_date) :return: An iterator of buckets owned by the current user. """ method = 'GET' url = get_target_url(self._endpoint_url) # Set user agent once before the request. headers = {'User-Agent': self._user_agent} # default for all requests. region = 'us-east-1' # region is set then use the region. if self._region: region = self._region # Get signature headers if any. headers = sign_v4(method, url, region, headers, self._access_key, self._secret_key, self._session_token, None) response = self._http.urlopen(method, url, body=None, headers=headers) if self._trace_output_stream: dump_http(method, url, headers, response, self._trace_output_stream) if response.status != 200: raise ResponseError(response, method).get_exception() try: return parse_list_buckets(response.data) except InvalidXMLError: if self._endpoint_url.endswith("s3.amazonaws.com") and (not self._access_key or not self._secret_key): raise AccessDenied(response)
python
{ "resource": "" }
q30332
Minio.bucket_exists
train
def bucket_exists(self, bucket_name): """ Check if the bucket exists and if the user has access to it. :param bucket_name: To test the existence and user access. :return: True on success. """ is_valid_bucket_name(bucket_name) try: self._url_open('HEAD', bucket_name=bucket_name) # If the bucket has not been created yet, MinIO will return a "NoSuchBucket" error. except NoSuchBucket: return False except ResponseError: raise return True
python
{ "resource": "" }
q30333
Minio.remove_bucket
train
def remove_bucket(self, bucket_name): """ Remove a bucket. :param bucket_name: Bucket to remove """ is_valid_bucket_name(bucket_name) self._url_open('DELETE', bucket_name=bucket_name) # Make sure to purge bucket_name from region cache. self._delete_bucket_region(bucket_name)
python
{ "resource": "" }
q30334
Minio.get_bucket_policy
train
def get_bucket_policy(self, bucket_name): """ Get bucket policy of given bucket name. :param bucket_name: Bucket name. """ is_valid_bucket_name(bucket_name) response = self._url_open("GET", bucket_name=bucket_name, query={"policy": ""}) return response.data
python
{ "resource": "" }
q30335
Minio.set_bucket_policy
train
def set_bucket_policy(self, bucket_name, policy): """ Set bucket policy of given bucket name. :param bucket_name: Bucket name. :param policy: Access policy/ies in string format. """ is_valid_policy_type(policy) is_valid_bucket_name(bucket_name) headers = { 'Content-Length': str(len(policy)), 'Content-Md5': get_md5_base64digest(policy) } content_sha256_hex = get_sha256_hexdigest(policy) self._url_open("PUT", bucket_name=bucket_name, query={"policy": ""}, headers=headers, body=policy, content_sha256=content_sha256_hex)
python
{ "resource": "" }
q30336
Minio.get_bucket_notification
train
def get_bucket_notification(self, bucket_name): """ Get notifications configured for the given bucket. :param bucket_name: Bucket name. """ is_valid_bucket_name(bucket_name) response = self._url_open( "GET", bucket_name=bucket_name, query={"notification": ""}, ) data = response.data.decode('utf-8') return parse_get_bucket_notification(data)
python
{ "resource": "" }
q30337
Minio.set_bucket_notification
train
def set_bucket_notification(self, bucket_name, notifications): """ Set the given notifications on the bucket. :param bucket_name: Bucket name. :param notifications: Notifications structure """ is_valid_bucket_name(bucket_name) is_valid_bucket_notification_config(notifications) content = xml_marshal_bucket_notifications(notifications) headers = { 'Content-Length': str(len(content)), 'Content-Md5': get_md5_base64digest(content) } content_sha256_hex = get_sha256_hexdigest(content) self._url_open( 'PUT', bucket_name=bucket_name, query={"notification": ""}, headers=headers, body=content, content_sha256=content_sha256_hex )
python
{ "resource": "" }
q30338
Minio.remove_all_bucket_notification
train
def remove_all_bucket_notification(self, bucket_name): """ Removes all bucket notification configs configured previously, this call disable event notifications on a bucket. This operation cannot be undone, to set notifications again you should use ``set_bucket_notification`` :param bucket_name: Bucket name. """ is_valid_bucket_name(bucket_name) content_bytes = xml_marshal_bucket_notifications({}) headers = { 'Content-Length': str(len(content_bytes)), 'Content-Md5': get_md5_base64digest(content_bytes) } content_sha256_hex = get_sha256_hexdigest(content_bytes) self._url_open( 'PUT', bucket_name=bucket_name, query={"notification": ""}, headers=headers, body=content_bytes, content_sha256=content_sha256_hex )
python
{ "resource": "" }
q30339
Minio.listen_bucket_notification
train
def listen_bucket_notification(self, bucket_name, prefix='', suffix='', events=['s3:ObjectCreated:*', 's3:ObjectRemoved:*', 's3:ObjectAccessed:*']): """ Yeilds new event notifications on a bucket, caller should iterate to read new notifications. NOTE: Notification is retried in case of `JSONDecodeError` otherwise the function raises an exception. :param bucket_name: Bucket name to listen event notifications from. :param prefix: Object key prefix to filter notifications for. :param suffix: Object key suffix to filter notifications for. :param events: Enables notifications for specific event types. of events. """ is_valid_bucket_name(bucket_name) # If someone explicitly set prefix to None convert it to empty string. if prefix is None: prefix = '' # If someone explicitly set suffix to None convert it to empty string. if suffix is None: suffix = '' url_components = urlsplit(self._endpoint_url) if url_components.hostname == 's3.amazonaws.com': raise InvalidArgumentError( 'Listening for event notifications on a bucket is a MinIO ' 'specific extension to bucket notification API. It is not ' 'supported by Amazon S3') query = { 'prefix': prefix, 'suffix': suffix, 'events': events, } while True: response = self._url_open('GET', bucket_name=bucket_name, query=query, preload_content=False) try: for line in response.stream(): if line.strip(): if hasattr(line, 'decode'): line = line.decode('utf-8') event = json.loads(line) if event['Records'] is not None: yield event except JSONDecodeError: response.close() continue
python
{ "resource": "" }
q30340
Minio.fget_object
train
def fget_object(self, bucket_name, object_name, file_path, request_headers=None, sse=None): """ Retrieves an object from a bucket and writes at file_path. Examples: minio.fget_object('foo', 'bar', 'localfile') :param bucket_name: Bucket to read object from. :param object_name: Name of the object to read. :param file_path: Local file path to save the object. :param request_headers: Any additional headers to be added with GET request. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) stat = self.stat_object(bucket_name, object_name, sse) if os.path.isdir(file_path): raise OSError("file is a directory.") # Create top level directory if needed. top_level_dir = os.path.dirname(file_path) if top_level_dir: mkdir_p(top_level_dir) # Write to a temporary file "file_path.part.minio" before saving. file_part_path = file_path + stat.etag + '.part.minio' # Open file in 'write+append' mode. with open(file_part_path, 'ab') as file_part_data: # Save current file_part statinfo. file_statinfo = os.stat(file_part_path) # Get partial object. response = self._get_partial_object(bucket_name, object_name, offset=file_statinfo.st_size, length=0, request_headers=request_headers, sse=sse) # Save content_size to verify if we wrote more data. content_size = int(response.headers['content-length']) # Save total_written. total_written = 0 for data in response.stream(amt=1024 * 1024): file_part_data.write(data) total_written += len(data) # Release the connection from the response at this point. response.release_conn() # Verify if we wrote data properly. if total_written < content_size: msg = 'Data written {0} bytes is smaller than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) if total_written > content_size: msg = 'Data written {0} bytes is in excess than the' \ 'specified size {1} bytes'.format(total_written, content_size) raise InvalidSizeError(msg) #Delete existing file to be compatible with Windows if os.path.exists(file_path): os.remove(file_path) #Rename with destination file path os.rename(file_part_path, file_path) # Return the stat return stat
python
{ "resource": "" }
q30341
Minio.copy_object
train
def copy_object(self, bucket_name, object_name, object_source, conditions=None, source_sse=None, sse=None, metadata=None): """ Copy a source object on object storage server to a new object. NOTE: Maximum object size supported by this API is 5GB. Examples: :param bucket_name: Bucket of new object. :param object_name: Name of new object. :param object_source: Source object to be copied. :param conditions: :class:`CopyConditions` object. Collection of supported CopyObject conditions. :param metadata: Any user-defined metadata to be copied along with destination object. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(object_source) headers = {} # Preserving the user-defined metadata in headers if metadata is not None: headers = amzprefix_user_metadata(metadata) headers["x-amz-metadata-directive"] = "REPLACE" if conditions: for k, v in conditions.items(): headers[k] = v # Source argument to copy_object can only be of type copy_SSE_C if source_sse: is_valid_source_sse_object(source_sse) headers.update(source_sse.marshal()) #Destination argument to copy_object cannot be of type copy_SSE_C if sse: is_valid_sse_object(sse) headers.update(sse.marshal()) headers['X-Amz-Copy-Source'] = queryencode(object_source) response = self._url_open('PUT', bucket_name=bucket_name, object_name=object_name, headers=headers) return parse_copy_object(bucket_name, object_name, response.data)
python
{ "resource": "" }
q30342
Minio.list_objects
train
def list_objects(self, bucket_name, prefix='', recursive=False): """ List objects in the given bucket. Examples: objects = minio.list_objects('foo') for current_object in objects: print(current_object) # hello # hello/ # hello/ # world/ objects = minio.list_objects('foo', prefix='hello/') for current_object in objects: print(current_object) # hello/world/ objects = minio.list_objects('foo', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # world/world/2 # ... objects = minio.list_objects('foo', prefix='hello/', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # hello/world/2 :param bucket_name: Bucket to list objects from :param prefix: String specifying objects returned must begin with :param recursive: If yes, returns all objects for a specified prefix :return: An iterator of objects in alphabetical order. """ is_valid_bucket_name(bucket_name) # If someone explicitly set prefix to None convert it to empty string. if prefix is None: prefix = '' method = 'GET' # Initialize query parameters. query = { 'max-keys': '1000', 'prefix': prefix } # Delimited by default. if not recursive: query['delimiter'] = '/' marker = '' is_truncated = True while is_truncated: if marker: query['marker'] = marker headers = {} response = self._url_open(method, bucket_name=bucket_name, query=query, headers=headers) objects, is_truncated, marker = parse_list_objects(response.data, bucket_name=bucket_name) for obj in objects: yield obj
python
{ "resource": "" }
q30343
Minio.list_objects_v2
train
def list_objects_v2(self, bucket_name, prefix='', recursive=False): """ List objects in the given bucket using the List objects V2 API. Examples: objects = minio.list_objects_v2('foo') for current_object in objects: print(current_object) # hello # hello/ # hello/ # world/ objects = minio.list_objects_v2('foo', prefix='hello/') for current_object in objects: print(current_object) # hello/world/ objects = minio.list_objects_v2('foo', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # world/world/2 # ... objects = minio.list_objects_v2('foo', prefix='hello/', recursive=True) for current_object in objects: print(current_object) # hello/world/1 # hello/world/2 :param bucket_name: Bucket to list objects from :param prefix: String specifying objects returned must begin with :param recursive: If yes, returns all objects for a specified prefix :return: An iterator of objects in alphabetical order. """ is_valid_bucket_name(bucket_name) # If someone explicitly set prefix to None convert it to empty string. if prefix is None: prefix = '' # Initialize query parameters. query = { 'list-type': '2', 'prefix': prefix } # Delimited by default. if not recursive: query['delimiter'] = '/' continuation_token = None is_truncated = True while is_truncated: if continuation_token is not None: query['continuation-token'] = continuation_token response = self._url_open(method='GET', bucket_name=bucket_name, query=query) objects, is_truncated, continuation_token = parse_list_objects_v2( response.data, bucket_name=bucket_name ) for obj in objects: yield obj
python
{ "resource": "" }
q30344
Minio.stat_object
train
def stat_object(self, bucket_name, object_name, sse=None): """ Check if an object exists. :param bucket_name: Bucket of object. :param object_name: Name of object :return: Object metadata if object exists """ headers = {} if sse: is_valid_sse_c_object(sse=sse) headers.update(sse.marshal()) is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) response = self._url_open('HEAD', bucket_name=bucket_name, object_name=object_name, headers=headers) etag = response.headers.get('etag', '').replace('"', '') size = int(response.headers.get('content-length', '0')) content_type = response.headers.get('content-type', '') last_modified = response.headers.get('last-modified') ## Capture only custom metadata. custom_metadata = dict() for k in response.headers: if is_supported_header(k) or is_amz_header(k): custom_metadata[k] = response.headers.get(k) if last_modified: last_modified = dateutil.parser.parse(last_modified).timetuple() return Object(bucket_name, object_name, last_modified, etag, size, content_type=content_type, metadata=custom_metadata)
python
{ "resource": "" }
q30345
Minio.remove_object
train
def remove_object(self, bucket_name, object_name): """ Remove an object from the bucket. :param bucket_name: Bucket of object to remove :param object_name: Name of object to remove :return: None """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) # No reason to store successful response, for errors # relevant exceptions are thrown. self._url_open('DELETE', bucket_name=bucket_name, object_name=object_name)
python
{ "resource": "" }
q30346
Minio._process_remove_objects_batch
train
def _process_remove_objects_batch(self, bucket_name, objects_batch): """ Requester and response parser for remove_objects """ # assemble request content for objects_batch content = xml_marshal_delete_objects(objects_batch) # compute headers headers = { 'Content-Md5': get_md5_base64digest(content), 'Content-Length': len(content) } query = {'delete': ''} content_sha256_hex = get_sha256_hexdigest(content) # send multi-object delete request response = self._url_open( 'POST', bucket_name=bucket_name, headers=headers, body=content, query=query, content_sha256=content_sha256_hex, ) # parse response to find delete errors return parse_multi_object_delete_response(response.data)
python
{ "resource": "" }
q30347
Minio.remove_objects
train
def remove_objects(self, bucket_name, objects_iter): """ Removes multiple objects from a bucket. :param bucket_name: Bucket from which to remove objects :param objects_iter: A list, tuple or iterator that provides objects names to delete. :return: An iterator of MultiDeleteError instances for each object that had a delete error. """ is_valid_bucket_name(bucket_name) if isinstance(objects_iter, basestring): raise TypeError( 'objects_iter cannot be `str` or `bytes` instance. It must be ' 'a list, tuple or iterator of object names' ) # turn list like objects into an iterator. objects_iter = itertools.chain(objects_iter) obj_batch = [] exit_loop = False while not exit_loop: try: object_name = next(objects_iter) is_non_empty_string(object_name) except StopIteration: exit_loop = True if not exit_loop: obj_batch.append(object_name) # if we have 1000 items in the batch, or we have to exit # the loop, we have to make a request to delete objects. if len(obj_batch) == 1000 or (exit_loop and len(obj_batch) > 0): # send request and parse response errs_result = self._process_remove_objects_batch( bucket_name, obj_batch ) # return the delete errors. for err_result in errs_result: yield err_result # clear batch for next set of items obj_batch = []
python
{ "resource": "" }
q30348
Minio.list_incomplete_uploads
train
def list_incomplete_uploads(self, bucket_name, prefix='', recursive=False): """ List all in-complete uploads for a given bucket. Examples: incomplete_uploads = minio.list_incomplete_uploads('foo') for current_upload in incomplete_uploads: print(current_upload) # hello # hello/ # hello/ # world/ incomplete_uploads = minio.list_incomplete_uploads('foo', prefix='hello/') for current_upload in incomplete_uploads: print(current_upload) # hello/world/ incomplete_uploads = minio.list_incomplete_uploads('foo', recursive=True) for current_upload in incomplete_uploads: print(current_upload) # hello/world/1 # world/world/2 # ... incomplete_uploads = minio.list_incomplete_uploads('foo', prefix='hello/', recursive=True) for current_upload in incomplete_uploads: print(current_upload) # hello/world/1 # hello/world/2 :param bucket_name: Bucket to list incomplete uploads :param prefix: String specifying objects returned must begin with. :param recursive: If yes, returns all incomplete uploads for a specified prefix. :return: An generator of incomplete uploads in alphabetical order. """ is_valid_bucket_name(bucket_name) return self._list_incomplete_uploads(bucket_name, prefix, recursive)
python
{ "resource": "" }
q30349
Minio._list_incomplete_uploads
train
def _list_incomplete_uploads(self, bucket_name, prefix='', recursive=False, is_aggregate_size=True): """ List incomplete uploads list all previously uploaded incomplete multipart objects. :param bucket_name: Bucket name to list uploaded objects. :param prefix: String specifying objects returned must begin with. :param recursive: If yes, returns all incomplete objects for a specified prefix. :return: An generator of incomplete uploads in alphabetical order. """ is_valid_bucket_name(bucket_name) # If someone explicitly set prefix to None convert it to empty string. if prefix is None: prefix = '' # Initialize query parameters. query = { 'uploads': '', 'max-uploads': '1000', 'prefix': prefix } if not recursive: query['delimiter'] = '/' key_marker, upload_id_marker = '', '' is_truncated = True while is_truncated: if key_marker: query['key-marker'] = key_marker if upload_id_marker: query['upload-id-marker'] = upload_id_marker response = self._url_open('GET', bucket_name=bucket_name, query=query) (uploads, is_truncated, key_marker, upload_id_marker) = parse_list_multipart_uploads(response.data, bucket_name) for upload in uploads: if is_aggregate_size: upload.size = self._get_total_multipart_upload_size( upload.bucket_name, upload.object_name, upload.upload_id) yield upload
python
{ "resource": "" }
q30350
Minio._get_total_multipart_upload_size
train
def _get_total_multipart_upload_size(self, bucket_name, object_name, upload_id): """ Get total multipart upload size. :param bucket_name: Bucket name to list parts for. :param object_name: Object name to list parts for. :param upload_id: Upload id of the previously uploaded object name. """ return sum( [part.size for part in self._list_object_parts(bucket_name, object_name, upload_id)] )
python
{ "resource": "" }
q30351
Minio._list_object_parts
train
def _list_object_parts(self, bucket_name, object_name, upload_id): """ List all parts. :param bucket_name: Bucket name to list parts for. :param object_name: Object name to list parts for. :param upload_id: Upload id of the previously uploaded object name. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(upload_id) query = { 'uploadId': upload_id, 'max-parts': '1000' } is_truncated = True part_number_marker = '' while is_truncated: if part_number_marker: query['part-number-marker'] = str(part_number_marker) response = self._url_open('GET', bucket_name=bucket_name, object_name=object_name, query=query) parts, is_truncated, part_number_marker = parse_list_parts( response.data, bucket_name=bucket_name, object_name=object_name, upload_id=upload_id ) for part in parts: yield part
python
{ "resource": "" }
q30352
Minio.remove_incomplete_upload
train
def remove_incomplete_upload(self, bucket_name, object_name): """ Remove all in-complete uploads for a given bucket_name and object_name. :param bucket_name: Bucket to drop incomplete uploads :param object_name: Name of object to remove incomplete uploads :return: None """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) recursive = True uploads = self._list_incomplete_uploads(bucket_name, object_name, recursive, is_aggregate_size=False) for upload in uploads: if object_name == upload.object_name: self._remove_incomplete_upload(bucket_name, object_name, upload.upload_id)
python
{ "resource": "" }
q30353
Minio.presigned_url
train
def presigned_url(self, method, bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None): """ Presigns a method on an object and provides a url Example: from datetime import timedelta presignedURL = presigned_url('GET', 'bucket_name', 'object_name', expires=timedelta(days=7)) print(presignedURL) :param bucket_name: Bucket for the presigned url. :param object_name: Object for which presigned url is generated. :param expires: Optional expires argument to specify timedelta. Defaults to 7days. :params response_headers: Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. :params request_date: Optional request_date argument to specify a different request date. Default is current date. :return: Presigned put object url. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) if expires.total_seconds() < 1 or \ expires.total_seconds() > _MAX_EXPIRY_TIME: raise InvalidArgumentError('Expires param valid values' ' are between 1 sec to' ' {0} secs'.format(_MAX_EXPIRY_TIME)) region = self._get_bucket_region(bucket_name) url = get_target_url(self._endpoint_url, bucket_name=bucket_name, object_name=object_name, bucket_region=region) return presign_v4(method, url, self._access_key, self._secret_key, session_token=self._session_token, region=region, expires=int(expires.total_seconds()), response_headers=response_headers, request_date=request_date)
python
{ "resource": "" }
q30354
Minio.presigned_get_object
train
def presigned_get_object(self, bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None): """ Presigns a get object request and provides a url Example: from datetime import timedelta presignedURL = presigned_get_object('bucket_name', 'object_name', timedelta(days=7)) print(presignedURL) :param bucket_name: Bucket for the presigned url. :param object_name: Object for which presigned url is generated. :param expires: Optional expires argument to specify timedelta. Defaults to 7days. :params response_headers: Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. :params request_date: Optional request_date argument to specify a different request date. Default is current date. :return: Presigned url. """ return self.presigned_url('GET', bucket_name, object_name, expires, response_headers=response_headers, request_date=request_date)
python
{ "resource": "" }
q30355
Minio.presigned_put_object
train
def presigned_put_object(self, bucket_name, object_name, expires=timedelta(days=7)): """ Presigns a put object request and provides a url Example: from datetime import timedelta presignedURL = presigned_put_object('bucket_name', 'object_name', timedelta(days=7)) print(presignedURL) :param bucket_name: Bucket for the presigned url. :param object_name: Object for which presigned url is generated. :param expires: optional expires argument to specify timedelta. Defaults to 7days. :return: Presigned put object url. """ return self.presigned_url('PUT', bucket_name, object_name, expires)
python
{ "resource": "" }
q30356
Minio.presigned_post_policy
train
def presigned_post_policy(self, post_policy): """ Provides a POST form data that can be used for object uploads. Example: post_policy = PostPolicy() post_policy.set_bucket_name('bucket_name') post_policy.set_key_startswith('objectPrefix/') expires_date = datetime.utcnow()+timedelta(days=10) post_policy.set_expires(expires_date) print(presigned_post_policy(post_policy)) :param post_policy: Post_Policy object. :return: PostPolicy form dictionary to be used in curl or HTML forms. """ post_policy.is_valid() date = datetime.utcnow() iso8601_date = date.strftime("%Y%m%dT%H%M%SZ") region = self._get_bucket_region(post_policy.form_data['bucket']) credential_string = generate_credential_string(self._access_key, date, region) policy = [ ('eq', '$x-amz-date', iso8601_date), ('eq', '$x-amz-algorithm', _SIGN_V4_ALGORITHM), ('eq', '$x-amz-credential', credential_string), ] if self._session_token: policy.add(('eq', '$x-amz-security-token', self._session_token)) post_policy_base64 = post_policy.base64(extras=policy) signature = post_presign_signature(date, region, self._secret_key, post_policy_base64) form_data = { 'policy': post_policy_base64, 'x-amz-algorithm': _SIGN_V4_ALGORITHM, 'x-amz-credential': credential_string, 'x-amz-date': iso8601_date, 'x-amz-signature': signature, } if self._session_token: form_data['x-amz-security-token'] = self._session_token post_policy.form_data.update(form_data) url_str = get_target_url(self._endpoint_url, bucket_name=post_policy.form_data['bucket'], bucket_region=region) return (url_str, post_policy.form_data)
python
{ "resource": "" }
q30357
Minio._do_put_object
train
def _do_put_object(self, bucket_name, object_name, part_data, part_size, upload_id='', part_number=0, metadata=None, sse=None, progress=None): """ Initiate a multipart PUT operation for a part number or single PUT object. :param bucket_name: Bucket name for the multipart request. :param object_name: Object name for the multipart request. :param part_metadata: Part-data and metadata for the multipart request. :param upload_id: Upload id of the multipart request [OPTIONAL]. :param part_number: Part number of the data to be uploaded [OPTIONAL]. :param metadata: Any additional metadata to be uploaded along with your object. :param progress: A progress object """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) # Accept only bytes - otherwise we need to know how to encode # the data to bytes before storing in the object. if not isinstance(part_data, bytes): raise ValueError('Input data must be bytes type') headers = { 'Content-Length': part_size, } md5_base64 = '' sha256_hex = '' if self._is_ssl: md5_base64 = get_md5_base64digest(part_data) sha256_hex = _UNSIGNED_PAYLOAD else: sha256_hex = get_sha256_hexdigest(part_data) if md5_base64: headers['Content-Md5'] = md5_base64 if metadata: headers.update(metadata) query = {} if part_number > 0 and upload_id: query = { 'uploadId': upload_id, 'partNumber': str(part_number), } # Encryption headers for multipart uploads should # be set only in the case of SSE-C. if sse and sse.type() == "SSE-C": headers.update(sse.marshal()) elif sse: headers.update(sse.marshal()) response = self._url_open( 'PUT', bucket_name=bucket_name, object_name=object_name, query=query, headers=headers, body=io.BytesIO(part_data), content_sha256=sha256_hex ) if progress: # Update the 'progress' object with uploaded 'part_size'. progress.update(part_size) return response.headers['etag'].replace('"', '')
python
{ "resource": "" }
q30358
Minio._stream_put_object
train
def _stream_put_object(self, bucket_name, object_name, data, content_size, metadata=None, sse=None, progress=None, part_size=MIN_PART_SIZE): """ Streaming multipart upload operation. :param bucket_name: Bucket name of the multipart upload. :param object_name: Object name of the multipart upload. :param content_size: Total size of the content to be uploaded. :param content_type: Content type of of the multipart upload. Defaults to 'application/octet-stream'. :param metadata: Any additional metadata to be uploaded along with your object. :param progress: A progress object :param part_size: Multipart part size """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) if not callable(getattr(data, 'read')): raise ValueError( 'Invalid input data does not implement a callable read() method') # get upload id. upload_id = self._new_multipart_upload(bucket_name, object_name, metadata, sse) # Initialize variables total_uploaded = 0 uploaded_parts = {} # Calculate optimal part info. total_parts_count, part_size, last_part_size = optimal_part_info( content_size, part_size) # Instantiate a thread pool with 3 worker threads pool = ThreadPool(_PARALLEL_UPLOADERS) pool.start_parallel() # Generate new parts and upload <= current_part_size until # part_number reaches total_parts_count calculated for the # given size. Additionally part_manager() also provides # md5digest and sha256digest for the partitioned data. for part_number in range(1, total_parts_count + 1): current_part_size = (part_size if part_number < total_parts_count else last_part_size) part_data = read_full(data, current_part_size) pool.add_task(self._upload_part_routine, (bucket_name, object_name, upload_id, part_number, part_data, sse, progress)) try: upload_result = pool.result() except: # Any exception that occurs sends an abort on the # on-going multipart operation. self._remove_incomplete_upload(bucket_name, object_name, upload_id) raise # Update uploaded_parts with the part uploads result # and check total uploaded data. while not upload_result.empty(): part_number, etag, total_read = upload_result.get() uploaded_parts[part_number] = UploadPart(bucket_name, object_name, upload_id, part_number, etag, None, total_read) total_uploaded += total_read if total_uploaded != content_size: msg = 'Data uploaded {0} is not equal input size ' \ '{1}'.format(total_uploaded, content_size) # cleanup incomplete upload upon incorrect upload # automatically self._remove_incomplete_upload(bucket_name, object_name, upload_id) raise InvalidSizeError(msg) # Complete all multipart transactions if possible. try: mpart_result = self._complete_multipart_upload(bucket_name, object_name, upload_id, uploaded_parts) except: # Any exception that occurs sends an abort on the # on-going multipart operation. self._remove_incomplete_upload(bucket_name, object_name, upload_id) raise # Return etag here. return mpart_result.etag
python
{ "resource": "" }
q30359
Minio._remove_incomplete_upload
train
def _remove_incomplete_upload(self, bucket_name, object_name, upload_id): """ Remove incomplete multipart request. :param bucket_name: Bucket name of the incomplete upload. :param object_name: Object name of incomplete upload. :param upload_id: Upload id of the incomplete upload. """ # No reason to store successful response, for errors # relevant exceptions are thrown. self._url_open('DELETE', bucket_name=bucket_name, object_name=object_name, query={'uploadId': upload_id}, headers={})
python
{ "resource": "" }
q30360
Minio._new_multipart_upload
train
def _new_multipart_upload(self, bucket_name, object_name, metadata=None, sse=None): """ Initialize new multipart upload request. :param bucket_name: Bucket name of the new multipart request. :param object_name: Object name of the new multipart request. :param metadata: Additional new metadata for the new object. :return: Returns an upload id. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) headers = {} if metadata: headers.update(metadata) if sse: headers.update(sse.marshal()) response = self._url_open('POST', bucket_name=bucket_name, object_name=object_name, query={'uploads': ''}, headers=headers) return parse_new_multipart_upload(response.data)
python
{ "resource": "" }
q30361
Minio._complete_multipart_upload
train
def _complete_multipart_upload(self, bucket_name, object_name, upload_id, uploaded_parts): """ Complete an active multipart upload request. :param bucket_name: Bucket name of the multipart request. :param object_name: Object name of the multipart request. :param upload_id: Upload id of the active multipart request. :param uploaded_parts: Key, Value dictionary of uploaded parts. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(upload_id) # Order uploaded parts as required by S3 specification ordered_parts = [] for part in sorted(uploaded_parts.keys()): ordered_parts.append(uploaded_parts[part]) data = xml_marshal_complete_multipart_upload(ordered_parts) sha256_hex = get_sha256_hexdigest(data) md5_base64 = get_md5_base64digest(data) headers = { 'Content-Length': len(data), 'Content-Type': 'application/xml', 'Content-Md5': md5_base64, } response = self._url_open('POST', bucket_name=bucket_name, object_name=object_name, query={'uploadId': upload_id}, headers=headers, body=data, content_sha256=sha256_hex) return parse_multipart_upload_result(response.data)
python
{ "resource": "" }
q30362
Minio._get_bucket_region
train
def _get_bucket_region(self, bucket_name): """ Get region based on the bucket name. :param bucket_name: Bucket name for which region will be fetched. :return: Region of bucket name. """ # Region set in constructor, return right here. if self._region: return self._region # get bucket location for Amazon S3. region = 'us-east-1' # default to US standard. if bucket_name in self._region_map: region = self._region_map[bucket_name] else: region = self._get_bucket_location(bucket_name) self._region_map[bucket_name] = region # Success. return region
python
{ "resource": "" }
q30363
Minio._get_bucket_location
train
def _get_bucket_location(self, bucket_name): """ Get bucket location. :param bucket_name: Fetches location of the Bucket name. :return: location of bucket name is returned. """ method = 'GET' url = self._endpoint_url + '/' + bucket_name + '?location=' headers = {} # default for all requests. region = 'us-east-1' # Region is set override. if self._region: return self._region # For anonymous requests no need to get bucket location. if self._access_key is None or self._secret_key is None: return 'us-east-1' # Get signature headers if any. headers = sign_v4(method, url, region, headers, self._access_key, self._secret_key, self._session_token, None) response = self._http.urlopen(method, url, body=None, headers=headers) if self._trace_output_stream: dump_http(method, url, headers, response, self._trace_output_stream) if response.status != 200: raise ResponseError(response, method, bucket_name).get_exception() location = parse_location_constraint(response.data) # location is empty for 'US standard region' if not location: return 'us-east-1' # location can be 'EU' convert it to meaningful 'eu-west-1' if location == 'EU': return 'eu-west-1' return location
python
{ "resource": "" }
q30364
open
train
def open(data_dir=nlpir.PACKAGE_DIR, encoding=ENCODING, encoding_errors=ENCODING_ERRORS, license_code=None): """Initializes the NLPIR API. This calls the function :func:`~pynlpir.nlpir.Init`. :param str data_dir: The absolute path to the directory that has NLPIR's `Data` directory (defaults to :data:`pynlpir.nlpir.PACKAGE_DIR`). :param str encoding: The encoding that the Chinese source text will be in (defaults to ``'utf_8'``). Possible values include ``'gbk'``, ``'utf_8'``, or ``'big5'``. :param str encoding_errors: The desired encoding error handling scheme. Possible values include ``'strict'``, ``'ignore'``, and ``'replace'``. The default error handler is 'strict' meaning that encoding errors raise :class:`ValueError` (or a more codec specific subclass, such as :class:`UnicodeEncodeError`). :param str license_code: The license code that should be used when initializing NLPIR. This is generally only used by commercial users. :raises RuntimeError: The NLPIR API failed to initialize. Sometimes, NLPIR leaves an error log in the current working directory or NLPIR's ``Data`` directory that provides more detailed messages (but this isn't always the case). :raises LicenseError: The NLPIR license appears to be missing or expired. """ if license_code is None: license_code = '' global ENCODING if encoding.lower() in ('utf_8', 'utf-8', 'u8', 'utf', 'utf8'): ENCODING = 'utf_8' encoding_constant = nlpir.UTF8_CODE elif encoding.lower() in ('gbk', '936', 'cp936', 'ms936'): ENCODING = 'gbk' encoding_constant = nlpir.GBK_CODE elif encoding.lower() in ('big5', 'big5-tw', 'csbig5'): ENCODING = 'big5' encoding_constant = nlpir.BIG5_CODE else: raise ValueError("encoding must be one of 'utf_8', 'big5', or 'gbk'.") logger.debug("Initializing the NLPIR API: 'data_dir': '{}', 'encoding': " "'{}', 'license_code': '{}'".format( data_dir, encoding, license_code)) global ENCODING_ERRORS if encoding_errors not in ('strict', 'ignore', 'replace'): raise ValueError("encoding_errors must be one of 'strict', 'ignore', " "or 'replace'.") else: ENCODING_ERRORS = encoding_errors # Init in Python 3 expects bytes, not strings. if is_python3 and isinstance(data_dir, str): data_dir = _encode(data_dir) if is_python3 and isinstance(license_code, str): license_code = _encode(license_code) if not nlpir.Init(data_dir, encoding_constant, license_code): _attempt_to_raise_license_error(data_dir) raise RuntimeError("NLPIR function 'NLPIR_Init' failed.") else: logger.debug("NLPIR API initialized.")
python
{ "resource": "" }
q30365
_attempt_to_raise_license_error
train
def _attempt_to_raise_license_error(data_dir): """Raise an error if NLPIR has detected a missing or expired license. :param str data_dir: The directory containing NLPIR's `Data` directory. :raises LicenseError: The NLPIR license appears to be missing or expired. """ if isinstance(data_dir, bytes): data_dir = _decode(data_dir) data_dir = os.path.join(data_dir, 'Data') current_date = dt.date.today().strftime('%Y%m%d') timestamp = dt.datetime.today().strftime('[%Y-%m-%d %H:%M:%S]') data_files = os.listdir(data_dir) for f in data_files: if f == (current_date + '.err'): file_name = os.path.join(data_dir, f) with fopen(file_name) as error_file: for line in error_file: if not line.startswith(timestamp): continue if 'Not valid license' in line: raise LicenseError('Your license appears to have ' 'expired. Try running "pynlpir ' 'update".') elif 'Can not open License file' in line: raise LicenseError('Your license appears to be ' 'missing. Try running "pynlpir ' 'update".')
python
{ "resource": "" }
q30366
update_license_file
train
def update_license_file(data_dir): """Update NLPIR license file if it is out-of-date or missing. :param str data_dir: The NLPIR data directory that houses the license. :returns bool: Whether or not an update occurred. """ license_file = os.path.join(data_dir, LICENSE_FILENAME) temp_dir = tempfile.mkdtemp() gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME) try: _, headers = urlretrieve(LICENSE_URL, gh_license_filename) except IOError as e: # Python 2 uses the unhelpful IOError for this. Re-raise as the more # appropriate URLError. raise URLError(e.strerror) with open(gh_license_filename, 'rb') as f: github_license = f.read() try: with open(license_file, 'rb') as f: current_license = f.read() except (IOError, OSError): current_license = b'' github_digest = hashlib.sha256(github_license).hexdigest() current_digest = hashlib.sha256(current_license).hexdigest() if github_digest == current_digest: return False shutil.copyfile(gh_license_filename, license_file) shutil.rmtree(temp_dir, ignore_errors=True) return True
python
{ "resource": "" }
q30367
update
train
def update(data_dir): """Update NLPIR license.""" try: license_updated = update_license_file(data_dir) except URLError: click.secho('Error: unable to fetch newest license.', fg='red') exit(1) except (IOError, OSError): click.secho('Error: unable to move license to data directory.', fg='red') exit(1) if license_updated: click.echo('License updated.') else: click.echo('Your license is already up-to-date.')
python
{ "resource": "" }
q30368
load_library
train
def load_library(platform, is_64bit, lib_dir=LIB_DIR): """Loads the NLPIR library appropriate for the user's system. This function is called automatically when this module is loaded. :param str platform: The platform identifier for the user's system. :param bool is_64bit: Whether or not the user's system is 64-bit. :param str lib_dir: The directory that contains the library files (defaults to :data:`LIB_DIR`). :raises RuntimeError: The user's platform is not supported by NLPIR. """ logger.debug("Loading NLPIR library file from '{}'".format(lib_dir)) if platform.startswith('win') and is_64bit: lib = os.path.join(lib_dir, 'NLPIR64') logger.debug("Using library file for 64-bit Windows.") elif platform.startswith('win'): lib = os.path.join(lib_dir, 'NLPIR32') logger.debug("Using library file for 32-bit Windows.") elif platform.startswith('linux') and is_64bit: lib = os.path.join(lib_dir, 'libNLPIR64.so') logger.debug("Using library file for 64-bit GNU/Linux.") elif platform.startswith('linux'): lib = os.path.join(lib_dir, 'libNLPIR32.so') logger.debug("Using library file for 32-bit GNU/Linux.") elif platform == 'darwin': lib = os.path.join(lib_dir, 'libNLPIRios.so') logger.debug("Using library file for OSX/iOS.") else: raise RuntimeError("Platform '{}' is not supported by NLPIR.".format( platform)) lib_nlpir = cdll.LoadLibrary(lib if is_python3 else lib.encode('utf-8')) logger.debug("NLPIR library file '{}' loaded.".format(lib)) return lib_nlpir
python
{ "resource": "" }
q30369
get_func
train
def get_func(name, argtypes=None, restype=c_int, lib=libNLPIR): """Retrieves the corresponding NLPIR function. :param str name: The name of the NLPIR function to get. :param list argtypes: A list of :mod:`ctypes` data types that correspond to the function's argument types. :param restype: A :mod:`ctypes` data type that corresponds to the function's return type (only needed if the return type isn't :class:`ctypes.c_int`). :param lib: A :class:`ctypes.CDLL` instance for the NLPIR API library where the function will be retrieved from (defaults to :data:`libNLPIR`). :returns: The exported function. It can be called like any other Python callable. """ logger.debug("Getting NLPIR API function: 'name': '{}', 'argtypes': '{}'," " 'restype': '{}'.".format(name, argtypes, restype)) func = getattr(lib, name) if argtypes is not None: func.argtypes = argtypes if restype is not c_int: func.restype = restype logger.debug("NLPIR API function '{}' retrieved.".format(name)) return func
python
{ "resource": "" }
q30370
Parser.parse
train
def parse(self, text): """Parses and renders a text as HTML regarding current format. """ if self.format == 'markdown': try: import markdown except ImportError: raise RuntimeError(u"Looks like markdown is not installed") if text.startswith(u'\ufeff'): # check for unicode BOM text = text[1:] return markdown.markdown(text, extensions=self.md_extensions) elif self.format == 'restructuredtext': try: from landslide.rst import html_body except ImportError: raise RuntimeError(u"Looks like docutils are not installed") html = html_body(text, input_encoding=self.encoding) # RST generates pretty much markup to be removed in our case for (pattern, replacement, mode) in self.RST_REPLACEMENTS: html = re.sub(re.compile(pattern, mode), replacement, html, 0) return html.strip() elif self.format == 'textile': try: import textile except ImportError: raise RuntimeError(u"Looks like textile is not installed") text = text.replace('\n---\n', '\n<hr />\n') return textile.textile(text, encoding=self.encoding) else: raise NotImplementedError(u"Unsupported format %s, cannot parse" % self.format)
python
{ "resource": "" }
q30371
CodeHighlightingMacro.descape
train
def descape(self, string, defs=None): """Decodes html entities from a given string""" if defs is None: defs = html_entities.entitydefs f = lambda m: defs[m.group(1)] if len(m.groups()) > 0 else m.group(0) return self.html_entity_re.sub(f, string)
python
{ "resource": "" }
q30372
get_path_url
train
def get_path_url(path, relative=False): """ Returns an absolute or relative path url given a path """ if relative: return os.path.relpath(path) else: return 'file://%s' % os.path.abspath(path)
python
{ "resource": "" }
q30373
html_parts
train
def html_parts(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=1, initial_header_level=1): """ Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client. Parameters: - `input_string`: A multi-line text string; required. - `source_path`: Path to the source file or object. Optional, but useful for diagnostic output (system messages). - `destination_path`: Path to the file or object which will receive the output; optional. Used for determining relative paths (stylesheets, source links, etc.). - `input_encoding`: The encoding of `input_string`. If it is an encoded 8-bit string, provide the correct encoding. If it is a Unicode string, use "unicode", the default. - `doctitle`: Disable the promotion of a lone top-level section title to document title (and subsequent section title to document subtitle promotion); enabled by default. - `initial_header_level`: The initial level for header elements (e.g. 1 for "<h1>"). """ overrides = { 'input_encoding': input_encoding, 'doctitle_xform': doctitle, 'initial_header_level': initial_header_level, 'report_level': 5 } parts = core.publish_parts( source=input_string, source_path=source_path, destination_path=destination_path, writer_name='html', settings_overrides=overrides) return parts
python
{ "resource": "" }
q30374
html_body
train
def html_body(input_string, source_path=None, destination_path=None, input_encoding='unicode', doctitle=1, initial_header_level=1): """ Given an input string, returns an HTML fragment as a string. The return value is the contents of the <body> element. Parameters (see `html_parts()` for the remainder): - `output_encoding`: The desired encoding of the output. If a Unicode string is desired, use the default value of "unicode" . """ parts = html_parts( input_string=input_string, source_path=source_path, destination_path=destination_path, input_encoding=input_encoding, doctitle=doctitle, initial_header_level=initial_header_level) fragment = parts['html_body'] return fragment
python
{ "resource": "" }
q30375
Generator.add_user_css
train
def add_user_css(self, css_list): """ Adds supplementary user css files to the presentation. The ``css_list`` arg can be either a ``list`` or a string. """ if isinstance(css_list, string_types): css_list = [css_list] for css_path in css_list: if css_path and css_path not in self.user_css: if not os.path.exists(css_path): raise IOError('%s user css file not found' % (css_path,)) with codecs.open(css_path, encoding=self.encoding) as css_file: self.user_css.append({ 'path_url': utils.get_path_url(css_path, self.relative), 'contents': css_file.read(), })
python
{ "resource": "" }
q30376
Generator.add_user_js
train
def add_user_js(self, js_list): """ Adds supplementary user javascript files to the presentation. The ``js_list`` arg can be either a ``list`` or a string. """ if isinstance(js_list, string_types): js_list = [js_list] for js_path in js_list: if js_path and js_path not in self.user_js: if js_path.startswith("http:"): self.user_js.append({ 'path_url': js_path, 'contents': '', }) elif not os.path.exists(js_path): raise IOError('%s user js file not found' % (js_path,)) else: with codecs.open(js_path, encoding=self.encoding) as js_file: self.user_js.append({ 'path_url': utils.get_path_url(js_path, self.relative), 'contents': js_file.read(), })
python
{ "resource": "" }
q30377
Generator.add_toc_entry
train
def add_toc_entry(self, title, level, slide_number): """ Adds a new entry to current presentation Table of Contents. """ self.__toc.append({'title': title, 'number': slide_number, 'level': level})
python
{ "resource": "" }
q30378
Generator.toc
train
def toc(self): """ Smart getter for Table of Content list. """ toc = [] stack = [toc] for entry in self.__toc: entry['sub'] = [] while entry['level'] < len(stack): stack.pop() while entry['level'] > len(stack): stack.append(stack[-1][-1]['sub']) stack[-1].append(entry) return toc
python
{ "resource": "" }
q30379
Generator.execute
train
def execute(self): """ Execute this generator regarding its current configuration. """ if self.direct: if self.file_type == 'pdf': raise IOError(u"Direct output mode is not available for PDF " "export") else: print(self.render().encode(self.encoding)) else: self.write_and_log() if self.watch: from landslide.watcher import watch self.log(u"Watching %s\n" % self.watch_dir) watch(self.watch_dir, self.write_and_log)
python
{ "resource": "" }
q30380
Generator.get_template_file
train
def get_template_file(self): """ Retrieves Jinja2 template file path. """ if os.path.exists(os.path.join(self.theme_dir, 'base.html')): return os.path.join(self.theme_dir, 'base.html') default_dir = os.path.join(THEMES_DIR, 'default') if not os.path.exists(os.path.join(default_dir, 'base.html')): raise IOError(u"Cannot find base.html in default theme") return os.path.join(default_dir, 'base.html')
python
{ "resource": "" }
q30381
Generator.fetch_contents
train
def fetch_contents(self, source): """ Recursively fetches Markdown contents from a single file or directory containing itself Markdown files. """ slides = [] if type(source) is list: for entry in source: slides.extend(self.fetch_contents(entry)) elif os.path.isdir(source): self.log(u"Entering %s" % source) entries = os.listdir(source) entries.sort() for entry in entries: slides.extend(self.fetch_contents(os.path.join(source, entry))) else: try: parser = Parser(os.path.splitext(source)[1], self.encoding, self.extensions) except NotImplementedError: return slides self.log(u"Adding %s (%s)" % (source, parser.format)) try: with codecs.open(source, encoding=self.encoding) as file: file_contents = file.read() except UnicodeDecodeError: self.log(u"Unable to decode source %s: skipping" % source, 'warning') else: inner_slides = re.split(r'<hr.+>', parser.parse(file_contents)) for inner_slide in inner_slides: slides.append(self.get_slide_vars(inner_slide, source)) if not slides: self.log(u"Exiting %s: no contents found" % source, 'notice') return slides
python
{ "resource": "" }
q30382
Generator.find_theme_dir
train
def find_theme_dir(self, theme, copy_theme=False): """ Finds them dir path from its name. """ if os.path.exists(theme): self.theme_dir = theme elif os.path.exists(os.path.join(THEMES_DIR, theme)): self.theme_dir = os.path.join(THEMES_DIR, theme) else: raise IOError(u"Theme %s not found or invalid" % theme) target_theme_dir = os.path.join(os.getcwd(), 'theme') if copy_theme or os.path.exists(target_theme_dir): self.log(u'Copying %s theme directory to %s' % (theme, target_theme_dir)) if not os.path.exists(target_theme_dir): try: shutil.copytree(self.theme_dir, target_theme_dir) except Exception as e: self.log(u"Skipped copy of theme folder: %s" % e) pass self.theme_dir = target_theme_dir return self.theme_dir
python
{ "resource": "" }
q30383
Generator.get_css
train
def get_css(self): """ Fetches and returns stylesheet file path or contents, for both print and screen contexts, depending if we want a standalone presentation or not. """ css = {} print_css = os.path.join(self.theme_dir, 'css', 'print.css') if not os.path.exists(print_css): # Fall back to default theme print_css = os.path.join(THEMES_DIR, 'default', 'css', 'print.css') if not os.path.exists(print_css): raise IOError(u"Cannot find css/print.css in default theme") with codecs.open(print_css, encoding=self.encoding) as css_file: css['print'] = { 'path_url': utils.get_path_url(print_css, self.relative), 'contents': css_file.read(), } screen_css = os.path.join(self.theme_dir, 'css', 'screen.css') if (os.path.exists(screen_css)): with codecs.open(screen_css, encoding=self.encoding) as css_file: css['screen'] = { 'path_url': utils.get_path_url(screen_css, self.relative), 'contents': css_file.read(), } else: self.log(u"No screen stylesheet provided in current theme", 'warning') return css
python
{ "resource": "" }
q30384
Generator.get_js
train
def get_js(self): """ Fetches and returns javascript file path or contents, depending if we want a standalone presentation or not. """ js_file = os.path.join(self.theme_dir, 'js', 'slides.js') if not os.path.exists(js_file): js_file = os.path.join(THEMES_DIR, 'default', 'js', 'slides.js') if not os.path.exists(js_file): raise IOError(u"Cannot find slides.js in default theme") with codecs.open(js_file, encoding=self.encoding) as js_file_obj: return { 'path_url': utils.get_path_url(js_file, self.relative), 'contents': js_file_obj.read(), }
python
{ "resource": "" }
q30385
Generator.get_slide_vars
train
def get_slide_vars(self, slide_src, source=None): """ Computes a single slide template vars from its html source code. Also extracts slide informations for the table of contents. """ presenter_notes = None find = re.search(r'<h\d[^>]*>presenter notes</h\d>', slide_src, re.DOTALL | re.UNICODE | re.IGNORECASE) if find: if self.presenter_notes: presenter_notes = slide_src[find.end():].strip() slide_src = slide_src[:find.start()] find = re.search(r'(<h(\d+?).*?>(.+?)</h\d>)\s?(.+)?', slide_src, re.DOTALL | re.UNICODE) if not find: header = level = title = None content = slide_src.strip() else: header = find.group(1) level = int(find.group(2)) title = find.group(3) content = find.group(4).strip() if find.group(4) else find.group(4) slide_classes = [] if header: header, _ = self.process_macros(header, source) if content: content, slide_classes = self.process_macros(content, source) source_dict = {} if source: source_dict = {'rel_path': source, 'abs_path': os.path.abspath(source)} if header or content: return {'header': header, 'title': title, 'level': level, 'content': content, 'classes': slide_classes, 'source': source_dict, 'presenter_notes': presenter_notes, 'math_output': self.math_output}
python
{ "resource": "" }
q30386
Generator.get_template_vars
train
def get_template_vars(self, slides): """ Computes template vars from slides html source code. """ try: head_title = slides[0]['title'] except (IndexError, TypeError): head_title = "Untitled Presentation" for slide_index, slide_vars in enumerate(slides): if not slide_vars: continue self.num_slides += 1 slide_number = slide_vars['number'] = self.num_slides if slide_vars['level'] and slide_vars['level'] <= TOC_MAX_LEVEL: self.add_toc_entry(slide_vars['title'], slide_vars['level'], slide_number) else: # Put something in the TOC even if it doesn't have a title or level self.add_toc_entry(u"-", 1, slide_number) return {'head_title': head_title, 'num_slides': str(self.num_slides), 'slides': slides, 'toc': self.toc, 'embed': self.embed, 'css': self.get_css(), 'js': self.get_js(), 'user_css': self.user_css, 'user_js': self.user_js, 'math_output': self.math_output}
python
{ "resource": "" }
q30387
Generator.parse_config
train
def parse_config(self, config_source): """ Parses a landslide configuration file and returns a normalized python dict. """ self.log(u"Config %s" % config_source) try: raw_config = configparser.RawConfigParser() raw_config.read(config_source) except Exception as e: raise RuntimeError(u"Invalid configuration file: %s" % e) config = {} config['source'] = raw_config.get('landslide', 'source')\ .replace('\r', '').split('\n') if raw_config.has_option('landslide', 'theme'): config['theme'] = raw_config.get('landslide', 'theme') self.log(u"Using configured theme %s" % config['theme']) if raw_config.has_option('landslide', 'destination'): config['destination'] = raw_config.get('landslide', 'destination') if raw_config.has_option('landslide', 'linenos'): config['linenos'] = raw_config.get('landslide', 'linenos') for boolopt in ('embed', 'relative', 'copy_theme'): if raw_config.has_option('landslide', boolopt): config[boolopt] = raw_config.getboolean('landslide', boolopt) if raw_config.has_option('landslide', 'extensions'): config['extensions'] = ",".join(raw_config.get('landslide', 'extensions')\ .replace('\r', '').split('\n')) if raw_config.has_option('landslide', 'css'): config['css'] = raw_config.get('landslide', 'css')\ .replace('\r', '').split('\n') if raw_config.has_option('landslide', 'js'): config['js'] = raw_config.get('landslide', 'js')\ .replace('\r', '').split('\n') return config
python
{ "resource": "" }
q30388
Generator.process_macros
train
def process_macros(self, content, source=None): """ Processed all macros. """ macro_options = {'relative': self.relative, 'linenos': self.linenos} classes = [] for macro_class in self.macros: try: macro = macro_class(logger=self.logger, embed=self.embed, options=macro_options) content, add_classes = macro.process(content, source) if add_classes: classes += add_classes except Exception as e: self.log(u"%s processing failed in %s: %s" % (macro, source, e)) return content, classes
python
{ "resource": "" }
q30389
Generator.register_macro
train
def register_macro(self, *macros): """ Registers macro classes passed a method arguments. """ for m in macros: if inspect.isclass(m) and issubclass(m, macro_module.Macro): self.macros.append(m) else: raise TypeError("Coundn't register macro; a macro must inherit" " from macro.Macro")
python
{ "resource": "" }
q30390
Generator.render
train
def render(self): """ Returns generated html code. """ with codecs.open(self.template_file, encoding=self.encoding) as template_src: template = jinja2.Template(template_src.read()) slides = self.fetch_contents(self.source) context = self.get_template_vars(slides) html = template.render(context) if self.embed: images = re.findall(r'url\(["\']?(.*?\.(?:jpe?g|gif|png|svg)[\'"]?)\)', html, re.DOTALL | re.UNICODE) for img_url in images: img_url = img_url.replace('"', '').replace("'", '') if self.theme_dir: source = os.path.join(self.theme_dir, 'css') else: source = os.path.join(THEMES_DIR, self.theme, 'css') encoded_url = utils.encode_image_from_url(img_url, source) if encoded_url: html = html.replace(img_url, encoded_url, 1) self.log("Embedded theme image %s from theme directory %s" % (img_url, source)) else: # Missing file in theme directory. Try user_css folders found = False for css_entry in context['user_css']: directory = os.path.dirname(css_entry['path_url']) if not directory: directory = "." encoded_url = utils.encode_image_from_url(img_url, directory) if encoded_url: found = True html = html.replace(img_url, encoded_url, 1) self.log("Embedded theme image %s from directory %s" % (img_url, directory)) if not found: # Missing image file, etc... self.log(u"Failed to embed theme image %s" % img_url) return html
python
{ "resource": "" }
q30391
Generator.write
train
def write(self): """ Writes generated presentation code into the destination file. """ html = self.render() if self.file_type == 'pdf': self.write_pdf(html) else: with codecs.open(self.destination_file, 'w', encoding='utf_8') as outfile: outfile.write(html)
python
{ "resource": "" }
q30392
Generator.write_pdf
train
def write_pdf(self, html): """ Tries to write a PDF export from the command line using Prince if available. """ try: f = tempfile.NamedTemporaryFile(delete=False, suffix='.html') f.write(html.encode('utf_8', 'xmlcharrefreplace')) f.close() except Exception: raise IOError(u"Unable to create temporary file, aborting") dummy_fh = open(os.path.devnull, 'w') try: command = ["prince", f.name, "-o", self.destination_file] Popen(command, stderr=dummy_fh).communicate() except Exception: raise EnvironmentError(u"Unable to generate PDF file using " "prince. Is it installed and available?") finally: dummy_fh.close()
python
{ "resource": "" }
q30393
log
train
def log(message, type): """Log notices to stdout and errors to stderr""" (sys.stdout if type == 'notice' else sys.stderr).write(message + "\n")
python
{ "resource": "" }
q30394
run
train
def run(input_file, options): """Runs the Generator using parsed options.""" options.logger = log generator.Generator(input_file, **options.__dict__).execute()
python
{ "resource": "" }
q30395
get_subhash
train
def get_subhash(hash): """Get a second hash based on napiprojekt's hash. :param str hash: napiprojekt's hash. :return: the subhash. :rtype: str """ idx = [0xe, 0x3, 0x6, 0x8, 0x2] mul = [2, 2, 5, 4, 3] add = [0, 0xd, 0x10, 0xb, 0x5] b = [] for i in range(len(idx)): a = add[i] m = mul[i] i = idx[i] t = a + int(hash[i], 16) v = int(hash[t:t + 2], 16) b.append(('%x' % (v * m))[-1]) return ''.join(b)
python
{ "resource": "" }
q30396
get_subtitle_path
train
def get_subtitle_path(video_path, language=None, extension='.srt'): """Get the subtitle path using the `video_path` and `language`. :param str video_path: path to the video. :param language: language of the subtitle to put in the path. :type language: :class:`~babelfish.language.Language` :param str extension: extension of the subtitle. :return: path of the subtitle. :rtype: str """ subtitle_root = os.path.splitext(video_path)[0] if language: subtitle_root += '.' + str(language) return subtitle_root + extension
python
{ "resource": "" }
q30397
guess_matches
train
def guess_matches(video, guess, partial=False): """Get matches between a `video` and a `guess`. If a guess is `partial`, the absence information won't be counted as a match. :param video: the video. :type video: :class:`~subliminal.video.Video` :param guess: the guess. :type guess: dict :param bool partial: whether or not the guess is partial. :return: matches between the `video` and the `guess`. :rtype: set """ matches = set() if isinstance(video, Episode): # series if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series): matches.add('series') # title if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title): matches.add('title') # season if video.season and 'season' in guess and guess['season'] == video.season: matches.add('season') # episode if video.episode and 'episode' in guess and guess['episode'] == video.episode: matches.add('episode') # year if video.year and 'year' in guess and guess['year'] == video.year: matches.add('year') # count "no year" as an information if not partial and video.original_series and 'year' not in guess: matches.add('year') elif isinstance(video, Movie): # year if video.year and 'year' in guess and guess['year'] == video.year: matches.add('year') # title if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title): matches.add('title') # release_group if (video.release_group and 'release_group' in guess and sanitize_release_group(guess['release_group']) in get_equivalent_release_groups(sanitize_release_group(video.release_group))): matches.add('release_group') # resolution if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution: matches.add('resolution') # format if video.format and 'format' in guess and guess['format'].lower() == video.format.lower(): matches.add('format') # video_codec if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec: matches.add('video_codec') # audio_codec if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec: matches.add('audio_codec') return matches
python
{ "resource": "" }
q30398
Subtitle.text
train
def text(self): """Content as string If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding` """ if not self.content: return if self.encoding: return self.content.decode(self.encoding, errors='replace') return self.content.decode(self.guess_encoding(), errors='replace')
python
{ "resource": "" }
q30399
Subtitle.guess_encoding
train
def guess_encoding(self): """Guess encoding using the language, falling back on chardet. :return: the guessed encoding. :rtype: str """ logger.info('Guessing encoding for language %s', self.language) # always try utf-8 first encodings = ['utf-8'] # add language-specific encodings if self.language.alpha3 == 'zho': encodings.extend(['gb18030', 'big5']) elif self.language.alpha3 == 'jpn': encodings.append('shift-jis') elif self.language.alpha3 == 'ara': encodings.append('windows-1256') elif self.language.alpha3 == 'heb': encodings.append('windows-1255') elif self.language.alpha3 == 'tur': encodings.extend(['iso-8859-9', 'windows-1254']) elif self.language.alpha3 == 'pol': # Eastern European Group 1 encodings.extend(['windows-1250']) elif self.language.alpha3 == 'bul': # Eastern European Group 2 encodings.extend(['windows-1251']) else: # Western European (windows-1252) encodings.append('latin-1') # try to decode logger.debug('Trying encodings %r', encodings) for encoding in encodings: try: self.content.decode(encoding) except UnicodeDecodeError: pass else: logger.info('Guessed encoding %s', encoding) return encoding logger.warning('Could not guess encoding from language') # fallback on chardet encoding = chardet.detect(self.content)['encoding'] logger.info('Chardet found encoding %s', encoding) return encoding
python
{ "resource": "" }