code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
root = XML(xml_bytes) buckets = [] for bucket_data in root.find("Buckets"): name = bucket_data.findtext("Name") date_text = bucket_data.findtext("CreationDate") date_time = parseTime(date_text) bucket = Bucket(name, date_time) buckets.append(bucket) return buckets
def _parse_list_buckets(self, (response, xml_bytes))
Parse XML bucket list response.
2.829873
2.643715
1.070415
details = self._details( method=b"PUT", url_context=self._url_context(bucket=bucket), ) query = self._query_factory(details) return self._submit(query)
def create_bucket(self, bucket)
Create a new bucket.
9.104305
8.079567
1.126831
details = self._details( method=b"DELETE", url_context=self._url_context(bucket=bucket), ) query = self._query_factory(details) return self._submit(query)
def delete_bucket(self, bucket)
Delete a bucket. The bucket must be empty before it can be deleted.
8.576404
8.417294
1.018903
args = [] if marker is not None: args.append(("marker", marker)) if max_keys is not None: args.append(("max-keys", "%d" % (max_keys,))) if prefix is not None: args.append(("prefix", prefix)) if args: object_name = "?" + urlencode(args) else: object_name = None details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name=object_name), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_get_bucket) return d
def get_bucket(self, bucket, marker=None, max_keys=None, prefix=None)
Get a list of all the objects in a bucket. @param bucket: The name of the bucket from which to retrieve objects. @type bucket: L{unicode} @param marker: If given, indicate a position in the overall results where the results of this call should begin. The first result is the first object that sorts greater than this marker. @type marker: L{bytes} or L{NoneType} @param max_keys: If given, the maximum number of objects to return. @type max_keys: L{int} or L{NoneType} @param prefix: If given, indicate that only objects with keys beginning with this value should be returned. @type prefix: L{bytes} or L{NoneType} @return: A L{Deferred} that fires with a L{BucketListing} describing the result. @see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
2.933688
3.007221
0.975548
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?location"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_bucket_location) return d
def get_bucket_location(self, bucket)
Get the location (region) of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's region.
7.279707
6.923078
1.051513
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?lifecycle"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_lifecycle_config) return d
def get_bucket_lifecycle(self, bucket)
Get the lifecycle configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's lifecycle configuration.
7.375325
6.995645
1.054274
root = XML(xml_bytes) rules = [] for content_data in root.findall("Rule"): id = content_data.findtext("ID") prefix = content_data.findtext("Prefix") status = content_data.findtext("Status") expiration = int(content_data.findtext("Expiration/Days")) rules.append( LifecycleConfigurationRule(id, prefix, status, expiration)) return LifecycleConfiguration(rules)
def _parse_lifecycle_config(self, (response, xml_bytes))
Parse a C{LifecycleConfiguration} XML document.
3.301786
2.731583
1.208745
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name='?website'), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_website_config) return d
def get_bucket_website_config(self, bucket)
Get the website configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the bucket's website configuration.
7.257336
7.122781
1.018891
root = XML(xml_bytes) index_suffix = root.findtext("IndexDocument/Suffix") error_key = root.findtext("ErrorDocument/Key") return WebsiteConfiguration(index_suffix, error_key)
def _parse_website_config(self, (response, xml_bytes))
Parse a C{WebsiteConfiguration} XML document.
4.958977
3.858629
1.285166
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?notification"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_notification_config) return d
def get_bucket_notification_config(self, bucket)
Get the notification configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will request the bucket's notification configuration.
6.886223
6.69373
1.028757
root = XML(xml_bytes) topic = root.findtext("TopicConfiguration/Topic") event = root.findtext("TopicConfiguration/Event") return NotificationConfiguration(topic, event)
def _parse_notification_config(self, (response, xml_bytes))
Parse a C{NotificationConfiguration} XML document.
5.258881
4.134511
1.271948
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?versioning"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_versioning_config) return d
def get_bucket_versioning_config(self, bucket)
Get the versioning configuration of a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will request the bucket's versioning configuration.
6.806168
6.330946
1.075063
root = XML(xml_bytes) mfa_delete = root.findtext("MfaDelete") status = root.findtext("Status") return VersioningConfiguration(mfa_delete=mfa_delete, status=status)
def _parse_versioning_config(self, (response, xml_bytes))
Parse a C{VersioningConfiguration} XML document.
4.639362
3.766793
1.231648
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?acl"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_acl) return d
def get_bucket_acl(self, bucket)
Get the access control policy for a bucket.
7.527158
6.814229
1.104624
details = self._details( method=b"PUT", url_context=self._url_context(bucket=bucket, object_name=object_name), headers=self._headers(content_type), metadata=metadata, amz_headers=amz_headers, body=data, body_producer=body_producer, ) d = self._submit(self._query_factory(details)) d.addCallback(itemgetter(1)) return d
def put_object(self, bucket, object_name, data=None, content_type=None, metadata={}, amz_headers={}, body_producer=None)
Put an object in a bucket. An existing object with the same name will be replaced. @param bucket: The name of the bucket. @param object_name: The name of the object. @type object_name: L{unicode} @param data: The data to write. @param content_type: The type of data being written. @param metadata: A C{dict} used to build C{x-amz-meta-*} headers. @param amz_headers: A C{dict} used to build C{x-amz-*} headers. @return: A C{Deferred} that will fire with the result of request.
3.72719
3.844809
0.969408
dest_bucket = dest_bucket or source_bucket dest_object_name = dest_object_name or source_object_name amz_headers["copy-source"] = "/%s/%s" % (source_bucket, source_object_name) details = self._details( method=b"PUT", url_context=self._url_context( bucket=dest_bucket, object_name=dest_object_name, ), metadata=metadata, amz_headers=amz_headers, ) d = self._submit(self._query_factory(details)) return d
def copy_object(self, source_bucket, source_object_name, dest_bucket=None, dest_object_name=None, metadata={}, amz_headers={})
Copy an object stored in S3 from a source bucket to a destination bucket. @param source_bucket: The S3 bucket to copy the object from. @param source_object_name: The name of the object to copy. @param dest_bucket: Optionally, the S3 bucket to copy the object to. Defaults to C{source_bucket}. @param dest_object_name: Optionally, the name of the new object. Defaults to C{source_object_name}. @param metadata: A C{dict} used to build C{x-amz-meta-*} headers. @param amz_headers: A C{dict} used to build C{x-amz-*} headers. @return: A C{Deferred} that will fire with the result of request.
2.775847
2.857028
0.971586
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name=object_name), ) d = self._submit(self._query_factory(details)) d.addCallback(itemgetter(1)) return d
def get_object(self, bucket, object_name)
Get an object from a bucket.
6.905067
6.58244
1.049013
details = self._details( method=b"HEAD", url_context=self._url_context(bucket=bucket, object_name=object_name), ) d = self._submit(self._query_factory(details)) d.addCallback(lambda (response, body): _to_dict(response.responseHeaders)) return d
def head_object(self, bucket, object_name)
Retrieve object metadata only.
6.302231
5.931137
1.062567
details = self._details( method=b"DELETE", url_context=self._url_context(bucket=bucket, object_name=object_name), ) d = self._submit(self._query_factory(details)) return d
def delete_object(self, bucket, object_name)
Delete an object from a bucket. Once deleted, there is no method to restore or undelete an object.
7.272288
7.120425
1.021328
data = access_control_policy.to_xml() details = self._details( method=b"PUT", url_context=self._url_context( bucket=bucket, object_name='%s?acl' % (object_name,), ), body=data, ) query = self._query_factory(details) d = self._submit(query) d.addCallback(self._parse_acl) return d
def put_object_acl(self, bucket, object_name, access_control_policy)
Set access control policy on an object.
4.85667
4.786894
1.014576
data = RequestPayment(payer).to_xml() details = self._details( method=b"PUT", url_context=self._url_context(bucket=bucket, object_name="?requestPayment"), body=data, ) d = self._submit(self._query_factory(details)) return d
def put_request_payment(self, bucket, payer)
Set request payment configuration on bucket to payer. @param bucket: The name of the bucket. @param payer: The name of the payer. @return: A C{Deferred} that will fire with the result of the request.
8.11741
8.089254
1.003481
details = self._details( method=b"GET", url_context=self._url_context(bucket=bucket, object_name="?requestPayment"), ) d = self._submit(self._query_factory(details)) d.addCallback(self._parse_get_request_payment) return d
def get_request_payment(self, bucket)
Get the request payment configuration on a bucket. @param bucket: The name of the bucket. @return: A C{Deferred} that will fire with the name of the payer.
6.503654
6.31549
1.029794
objectname_plus = '%s?uploads' % object_name details = self._details( method=b"POST", url_context=self._url_context(bucket=bucket, object_name=objectname_plus), headers=self._headers(content_type), metadata=metadata, amz_headers=amz_headers, ) d = self._submit(self._query_factory(details)) d.addCallback( lambda (response, body): MultipartInitiationResponse.from_xml(body) ) return d
def init_multipart_upload(self, bucket, object_name, content_type=None, amz_headers={}, metadata={})
Initiate a multipart upload to a bucket. @param bucket: The name of the bucket @param object_name: The object name @param content_type: The Content-Type for the object @param metadata: C{dict} containing additional metadata @param amz_headers: A C{dict} used to build C{x-amz-*} headers. @return: C{str} upload_id
4.749833
5.140125
0.924069
parms = 'partNumber=%s&uploadId=%s' % (str(part_number), upload_id) objectname_plus = '%s?%s' % (object_name, parms) details = self._details( method=b"PUT", url_context=self._url_context(bucket=bucket, object_name=objectname_plus), headers=self._headers(content_type), metadata=metadata, body=data, ) d = self._submit(self._query_factory(details)) d.addCallback(lambda (response, data): _to_dict(response.responseHeaders)) return d
def upload_part(self, bucket, object_name, upload_id, part_number, data=None, content_type=None, metadata={}, body_producer=None)
Upload a part of data corresponding to a multipart upload. @param bucket: The bucket name @param object_name: The object name @param upload_id: The multipart upload id @param part_number: The part number @param data: Data (optional, requires body_producer if not specified) @param content_type: The Content-Type @param metadata: Additional metadata @param body_producer: an C{IBodyProducer} (optional, requires data if not specified) @return: the C{Deferred} from underlying query.submit() call
4.434878
4.585979
0.967051
data = self._build_complete_multipart_upload_xml(parts_list) objectname_plus = '%s?uploadId=%s' % (object_name, upload_id) details = self._details( method=b"POST", url_context=self._url_context(bucket=bucket, object_name=objectname_plus), headers=self._headers(content_type), metadata=metadata, body=data, ) d = self._submit(self._query_factory(details)) # TODO - handle error responses d.addCallback( lambda (response, body): MultipartCompletionResponse.from_xml(body) ) return d
def complete_multipart_upload(self, bucket, object_name, upload_id, parts_list, content_type=None, metadata={})
Complete a multipart upload. N.B. This can be possibly be a slow operation. @param bucket: The bucket name @param object_name: The object name @param upload_id: The multipart upload id @param parts_list: A List of all the parts (2-tuples of part sequence number and etag) @param content_type: The Content-Type of the object @param metadata: C{dict} containing additional metadata @return: a C{Deferred} that fires after request is complete
4.836924
4.798168
1.008077
if self.object_name and not self.content_type: # XXX nothing is currently done with the encoding... we may # need to in the future self.content_type, encoding = mimetypes.guess_type( self.object_name, strict=False)
def set_content_type(self)
Set the content type based on the file extension used in the object name.
5.566953
5.038372
1.104911
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)} if self.body_producer is None: data = self.data if data is None: data = b"" headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest() else: data = None headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD" for key, value in self.metadata.iteritems(): headers["x-amz-meta-" + key] = value for key, value in self.amz_headers.iteritems(): headers["x-amz-" + key] = value # Before we check if the content type is set, let's see if we can set # it by guessing the the mimetype. self.set_content_type() if self.content_type is not None: headers["Content-Type"] = self.content_type if self.creds is not None: headers["Authorization"] = self.sign( headers, data, s3_url_context(self.endpoint, self.bucket, self.object_name), instant, method=self.action) return headers
def get_headers(self, instant)
Build the list of headers needed in order to perform S3 operations.
3.351208
3.251463
1.030677
headers["host"] = url_context.get_encoded_host() if data is None: request = _auth_v4._CanonicalRequest.from_request_components( method=method, url=url_context.get_encoded_path(), headers=headers, headers_to_sign=('host', 'x-amz-date'), payload_hash=None, ) else: request = _auth_v4._CanonicalRequest.from_request_components_and_payload( method=method, url=url_context.get_encoded_path(), headers=headers, headers_to_sign=('host', 'x-amz-date'), payload=data, ) return _auth_v4._make_authorization_header( region=region, service="s3", canonical_request=request, credentials=self.creds, instant=instant)
def sign(self, headers, data, url_context, instant, method, region=REGION_US_EAST_1)
Sign this query using its built in credentials.
2.392406
2.421921
0.987813
if not url_context: url_context = s3_url_context( self.endpoint, self.bucket, self.object_name) d = self.get_page( url_context.get_encoded_url(), method=self.action, postdata=self.data or b"", headers=self.get_headers(utcnow()), ) return d.addErrback(s3_error_wrapper)
def submit(self, url_context=None, utcnow=datetime.datetime.utcnow)
Submit this query. @return: A deferred from get_page
4.93939
5.203354
0.94927
if 'id' in self.node.attrib: yield PlaceholderAttribute('id', self.node.attrib['id']) if 'tei-tag' in self.node.attrib: yield PlaceholderAttribute('tei-tag', self.node.attrib['tei-tag']) for attributes in self.node.iterchildren('attributes'): for attribute in self.__iter_attributes__(attributes): yield attribute
def attributes(self)
Contain attributes applicable to this element
4.159986
3.594683
1.157261
from .placeholder_division import PlaceholderDivision placeholder = None for item in self.__parts_and_divisions: if item.tag == 'part': if not placeholder: placeholder = PlaceholderDivision() placeholder.parts.append(item) else: if placeholder: yield placeholder placeholder = None yield item if placeholder: yield placeholder
def divisions(self)
Recursively get all the text divisions directly part of this element. If an element contains parts or text without tag. Those will be returned in order and wrapped with a TextDivision.
4.831961
4.097807
1.179158
for item in self.__parts_and_divisions: if item.tag == 'part' and item.is_placeholder: # A real part will always return a placeholder containing # its content. Placeholder parts cannot have children. yield item else: for part in item.all_parts: yield part
def all_parts(self)
Recursively get the parts flattened and in document order constituting the entire text e.g. if something has emphasis, a footnote or is marked as foreign. Text without a container element will be returned in order and wrapped with a TextPart.
9.525735
8.940837
1.065419
for item in self.__parts_and_divisions: if item.tag == 'part': yield item else: # Divisions shouldn't be beneath a part, but here's a fallback # for if this does happen for part in item.parts: yield part
def parts(self)
Get the parts directly below this element.
9.136602
7.722764
1.183074
from .division import Division from .part import Part from .placeholder_part import PlaceholderPart text = self.node.text if text: stripped_text = text.replace('\n', '') if stripped_text.strip(): yield PlaceholderPart(stripped_text) for item in self.node: if item.tag == 'part': yield Part(item) elif item.tag == 'div': yield Division(item) if item.tail: stripped_tail = item.tail.replace('\n', '') if stripped_tail.strip(): yield PlaceholderPart(stripped_tail)
def __parts_and_divisions(self)
The parts and divisions directly part of this element.
2.742535
2.643563
1.037439
return inject(self, '\n'.join(f'{division.tostring(inject)}' for division in self.divisions))
def tostring(self, inject)
Convert an element to a single string and allow the passed inject method to place content before any element.
11.051674
10.428257
1.059782
inputs = Input(shape=input_shape) for i, c in enumerate(rec_conv_layers): conv = Conv1D(c[0][0][0], c[0][0][1], padding=padding)(inputs) batch = BatchNormalization()(conv) act = LeakyReLU(alpha=c[0][2])(batch) pool = MaxPooling1D(pool_size=c[0][1][0], strides=c[0][1][1], padding=padding)(act) d1 = Dropout(c[0][3])(pool) inner = time_steps( input=d1, conv_layer=c[1], time_conv_layer=c[2], padding=padding) drop = Flatten()(inner) for i, d in enumerate(dense_layers): dense = Dense(d[0], activation='relu')(drop) bn = BatchNormalization()(dense) act = LeakyReLU(alpha=d[1])(bn) drop = Dropout(d[2])(act) output = Dense(output_layer[0], activation=output_layer[1])(drop) model = Model(inputs=inputs, outputs=output) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) return model
def RCL(input_shape, rec_conv_layers, dense_layers, output_layer=[1, 'sigmoid'], padding='same', optimizer='adam', loss='binary_crossentropy')
Summary Args: input_shape (tuple): The shape of the input layer. output_nodes (int): Number of nodes in the output layer. It depends on the loss function used. rec_conv_layers (list): RCL descriptor [ [ [(filter, kernel), (pool_size, stride), leak, drop], [(filter, kernel), (pool_size, stride), leak, drop], [(filter, kernel), (pool_size, stride), leak, drop, timesteps], ], ... [ [],[],[] ] ] dense_layers (TYPE): Dense layer descriptor [[fully_connected, leak, drop], ... []] padding (str, optional): Type of padding for conv and pooling layers optimizer (str or object optional): Keras optimizer as string or keras optimizer Returns: model: The compiled Kears model, ready for training.
2.026395
2.014938
1.005686
inputs = Input(shape=input_shape) for i, c in enumerate(conv_layers): if i == 0: conv = Conv1D(c[0][0], c[0][1], padding=padding)(inputs) else: conv = Conv1D(c[0][0], c[0][1], padding=padding)(drop) bn = BatchNormalization()(conv) act = LeakyReLU(alpha=c[2])(bn) pool = MaxPooling1D(pool_size=c[1][0], strides=c[1][1], padding=padding)(act) drop = Dropout(c[3])(pool) drop = Flatten()(drop) for i, d in enumerate(dense_layers): dense = Dense(d[0], activation='relu')(drop) bn = BatchNormalization()(dense) act = LeakyReLU(alpha=d[1])(bn) drop = Dropout(d[2])(act) output = Dense(output_layer[0], activation=output_layer[1])(drop) model = Model(inputs=inputs, outputs=output) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) return model
def VOICE(input_shape, conv_layers, dense_layers, output_layer=[1, 'sigmoid'], padding='same', optimizer='adam', loss='binary_crossentropy')
Conv1D CNN used primarily for voice data. Args: input_shape (tuple): The shape of the input layer targets (int): Number of targets conv_layers (list): Conv layer descriptor [[(filter, kernel), (pool_size, stride), leak, drop], ... []] dense_layers (TYPE): Dense layer descriptor [[fully_connected, leak, drop]] padding (str, optional): Type of padding for conv and pooling layers optimizer (str or object optional): Keras optimizer as string or keras optimizer Returns: TYPE: model, build_arguments
1.535193
1.568798
0.978579
inputs = Input(shape=input_shape) dense = inputs for i, d in enumerate(dense_layers): dense = Dense(d, activation='relu')(dense) dense = BatchNormalization()(dense) dense = Dropout(0.3)(dense) output = Dense(output_layer[0], activation=output_layer[1])(dense) model = Model(inputs=inputs, outputs=output) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) return model
def DNN(input_shape, dense_layers, output_layer=[1, 'sigmoid'], optimizer='adam', loss='binary_crossentropy')
Summary Args: input_shape (list): The shape of the input layer targets (int): Number of targets dense_layers (list): Dense layer descriptor [fully_connected] optimizer (str or object optional): Keras optimizer as string or keras optimizer Returns: TYPE: model, build_arguments
1.705817
1.999313
0.853202
# ErrorMeasure was already initialized. if 0 < len(self._errorValues): raise StandardError("An ErrorMeasure can only be initialized once.") # sort the TimeSeries to reduce the required comparison operations originalTimeSeries.sort_timeseries() calculatedTimeSeries.sort_timeseries() # Performance optimization append = self._errorValues.append appendDate = self._errorDates.append local_error = self.local_error minCalcIdx = 0 # calculate all valid local errors for orgPair in originalTimeSeries: for calcIdx in xrange(minCalcIdx, len(calculatedTimeSeries)): calcPair = calculatedTimeSeries[calcIdx] # Skip values that can not be compared if calcPair[0] != orgPair[0]: continue append(local_error(orgPair[1:], calcPair[1:])) appendDate(orgPair[0]) # return False, if the error cannot be calculated calculatedErrors = len(filter(lambda item: item is not None, self._errorValues)) minCalculatedErrors = self._minimalErrorCalculationPercentage * len(originalTimeSeries) if calculatedErrors < minCalculatedErrors: self._errorValues = [] self._errorDates = [] return False return True
def initialize(self, originalTimeSeries, calculatedTimeSeries)
Initializes the ErrorMeasure. During initialization, all :py:meth:`BaseErrorMeasure.local_errors` are calculated. :param TimeSeries originalTimeSeries: TimeSeries containing the original data. :param TimeSeries calculatedTimeSeries: TimeSeries containing calculated data. Calculated data is smoothed or forecasted data. :return: Return :py:const:`True` if the error could be calculated, :py:const:`False` otherwise based on the minimalErrorCalculationPercentage. :rtype: boolean :raise: Raises a :py:exc:`StandardError` if the error measure is initialized multiple times.
5.292751
4.345407
1.21801
if startDate is not None: possibleDates = filter(lambda date: date >= startDate, self._errorDates) if 0 == len(possibleDates): raise ValueError("%s does not represent a valid startDate." % startDate) startIdx = self._errorDates.index(min(possibleDates)) else: startIdx = int((startingPercentage * len(self._errorValues)) / 100.0) if endDate is not None: possibleDates = filter(lambda date: date <= endDate, self._errorDates) if 0 == len(possibleDates): raise ValueError("%s does not represent a valid endDate." % endDate) endIdx = self._errorDates.index(max(possibleDates)) + 1 else: endIdx = int((endPercentage * len(self._errorValues)) / 100.0) return self._errorValues[startIdx:endIdx]
def _get_error_values(self, startingPercentage, endPercentage, startDate, endDate)
Gets the defined subset of self._errorValues. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a list with the defined error values. :rtype: list :raise: Raises a ValueError if startDate or endDate do not represent correct boundaries for error calculation.
1.909752
1.882575
1.014436
# not initialized: if len(self._errorValues) == 0: raise StandardError("The last call of initialize(...) was not successfull.") # check for wrong parameters if not (0.0 <= startingPercentage <= 100.0): raise ValueError("startingPercentage has to be in [0.0, 100.0].") if not (0.0 <= endPercentage <= 100.0): raise ValueError("endPercentage has to be in [0.0, 100.0].") if endPercentage < startingPercentage: raise ValueError("endPercentage has to be greater or equal than startingPercentage.") return self._calculate(startingPercentage, endPercentage, startDate, endDate)
def get_error(self, startingPercentage=0.0, endPercentage=100.0, startDate=None, endDate=None)
Calculates the error for the given interval (startingPercentage, endPercentage) between the TimeSeries given during :py:meth:`BaseErrorMeasure.initialize`. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float :raise: Raises a :py:exc:`ValueError` in one of the following cases: - startingPercentage not in [0.0, 100.0] - endPercentage not in [0.0, 100.0] - endPercentage < startingPercentage :raise: Raises a :py:exc:`StandardError` if :py:meth:`BaseErrorMeasure.initialize` was not successfull before.
2.63271
2.267433
1.161097
if not (confidenceLevel >= 0 and confidenceLevel <= 1): raise ValueError("Parameter percentage has to be in [0,1]") underestimations = [] overestimations = [] for error in self._errorValues: if error is None: # None was in the lists causing some confidenceLevels not be calculated, not sure if that was intended, I suggested ignoring None values continue #Want 0 errors in both lists! if error >= 0: overestimations.append(error) if error <= 0: underestimations.append(error) #sort and cut off at confidence level. overestimations.sort() underestimations.sort(reverse=True) overIdx = int(len(overestimations) * confidenceLevel) - 1 underIdx = int(len(underestimations) * confidenceLevel) - 1 overestimation = 0.0 underestimation = 0.0 if overIdx >= 0: overestimation = overestimations[overIdx] else: print len(overestimations), confidenceLevel if underIdx >= 0: underestimation = underestimations[underIdx] return underestimation, overestimation
def confidence_interval(self, confidenceLevel)
Calculates for which value confidenceLevel% of the errors are closer to 0. :param float confidenceLevel: percentage of the errors that should be smaller than the returned value for overestimations and larger than the returned value for underestimations. confidenceLevel has to be in [0.0, 1.0] :return: return a tuple containing the underestimation and overestimation for the given confidenceLevel :rtype: tuple :warning: Index is still not calculated correctly
3.887437
3.477455
1.117897
# data_m = pd.read_table(filename, sep=',', header=None) try: data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False) date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0])) time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times magnitude_sum_acceleration = \ np.sqrt(data_m[:, 1] ** 2 + data_m[:, 2] ** 2 + data_m[:, 3] ** 2) data = {'td': time_difference, 'x': data_m[:, 1], 'y': data_m[:, 2], 'z': data_m[:, 3], 'mag_sum_acc': magnitude_sum_acceleration} data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc']) return data_frame except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("load data, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("load data ValueError ->%s", verr.message) except: logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
def load_cloudupdrs_data(filename, convert_times=1000000000.0)
This method loads data in the cloudupdrs format Usually the data will be saved in a csv file and it should look like this: .. code-block:: json timestamp_0, x_0, y_0, z_0 timestamp_1, x_1, y_1, z_1 timestamp_2, x_2, y_2, z_2 . . . timestamp_n, x_n, y_n, z_n where x, y, z are the components of the acceleration :param filename: The path to load data from :type filename: string :param convert_times: Convert times. The default is from from nanoseconds to seconds. :type convert_times: float
2.580675
2.558174
1.008796
data = pd.read_csv(filename, index_col=0) data.index = data.index.astype(np.datetime64) return data
def load_segmented_data(filename)
Helper function to load segmented gait time series data. :param filename: The full path of the file that contais our data. This should be a comma separated value (csv file). :type filename: str :return: The gait time series segmented data, with a x, y, z, mag_acc_sum and segmented columns. :rtype: pandas.DataFrame
2.545603
3.834004
0.663954
raw_data = pd.read_json(filename) date_times = pd.to_datetime(raw_data.timestamp * convert_times - raw_data.timestamp[0] * convert_times) time_difference = (raw_data.timestamp - raw_data.timestamp[0]) time_difference = time_difference.values magnitude_sum_acceleration = \ np.sqrt(raw_data.x.values ** 2 + raw_data.y.values ** 2 + raw_data.z.values ** 2) data = {'td': time_difference, 'x': raw_data.x.values, 'y': raw_data.y.values, 'z': raw_data.z.values, 'mag_sum_acc': magnitude_sum_acceleration} data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'x', 'y', 'z', 'mag_sum_acc']) return data_frame
def load_mpower_data(filename, convert_times=1000000000.0)
This method loads data in the `mpower <https://www.synapse.org/#!Synapse:syn4993293/wiki/247859>`_ format The format is like: .. code-block:: json [ { "timestamp":19298.67999479167, "x": ... , "y": ..., "z": ..., }, {...}, {...} ] :param filename: The path to load data from :type filename: string :param convert_times: Convert times. The default is from from nanoseconds to seconds. :type convert_times: float
2.264197
2.378597
0.951904
data_m = np.genfromtxt(filename, delimiter=',', invalid_raise=False, skip_footer=1) date_times = pd.to_datetime((data_m[:, 0] - data_m[0, 0])) time_difference = (data_m[:, 0] - data_m[0, 0]) / convert_times data = {'td': time_difference, 'action_type': data_m[:, 2],'x': data_m[:, 3], 'y': data_m[:, 4], 'x_target': data_m[:, 7], 'y_target': data_m[:, 8]} data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'action_type','x', 'y', 'x_target', 'y_target']) return data_frame
def load_finger_tapping_cloudupdrs_data(filename, convert_times=1000.0)
This method loads data in the cloudupdrs format for the finger tapping processor Usually the data will be saved in a csv file and it should look like this: .. code-block:: json timestamp_0, . , action_type_0, x_0, y_0, . , . , x_target_0, y_target_0 timestamp_1, . , action_type_1, x_1, y_1, . , . , x_target_1, y_target_1 timestamp_2, . , action_type_2, x_2, y_2, . , . , x_target_2, y_target_2 . . . timestamp_n, . , action_type_n, x_n, y_n, . , . , x_target_n, y_target_n where data_frame.x, data_frame.y: components of tapping position. data_frame.x_target, data_frame.y_target their target. :param filename: The path to load data from :type filename: string :param convert_times: Convert times. The default is from from milliseconds to seconds. :type convert_times: float
2.386889
2.276412
1.048531
raw_data = pd.read_json(filename) date_times = pd.to_datetime(raw_data.TapTimeStamp * convert_times - raw_data.TapTimeStamp[0] * convert_times) time_difference = (raw_data.TapTimeStamp - raw_data.TapTimeStamp[0]) time_difference = time_difference.values x = [] y = [] x_target = [] y_target = [] x_left, y_left, width_left, height_left = re.findall(r'-?\d+\.?\d*', button_left_rect) x_right, y_right, width_right, height_right = re.findall(r'-?\d+\.?\d*', button_right_rect) x_left_target = float(x_left) + ( float(width_left) / 2.0 ) y_left_target = float(y_left) + ( float(height_left) / 2.0 ) x_right_target = float(x_right) + ( float(width_right) / 2.0 ) y_right_target = float(y_right) + ( float(height_right) / 2.0 ) for row_index, row in raw_data.iterrows(): x_coord, y_coord = re.findall(r'-?\d+\.?\d*', row.TapCoordinate) x.append(float(x_coord)) y.append(float(y_coord)) if row.TappedButtonId == 'TappedButtonLeft': x_target.append(x_left_target) y_target.append(y_left_target) else: x_target.append(x_right_target) y_target.append(y_right_target) data = {'td': time_difference, 'action_type': 1.0, 'x': x, 'y': y, 'x_target': x_target, 'y_target': y_target} data_frame = pd.DataFrame(data, index=date_times, columns=['td', 'action_type', 'x', 'y', 'x_target', 'y_target']) data_frame.index.name = 'timestamp' return data_frame
def load_finger_tapping_mpower_data(filename, button_left_rect, button_right_rect, convert_times=1000.0)
This method loads data in the `mpower <https://www.synapse.org/#!Synapse:syn4993293/wiki/247859>`_ format
1.831862
1.85021
0.990083
if format_file == 'mpower': return load_mpower_data(filename) elif format_file == 'segmented': return load_segmented_data(filename) elif format_file == 'accapp': return load_accapp_data(filename) elif format_file == 'physics': return load_physics_data(filename) elif format_file == 'freeze': return load_freeze_data(filename) elif format_file == 'huga': return load_huga_data(filename) else: if format_file == 'ft_cloudupdrs': return load_finger_tapping_cloudupdrs_data(filename) else: if format_file == 'ft_mpower': if button_left_rect is not None and button_right_rect is not None: return load_finger_tapping_mpower_data(filename, button_left_rect, button_right_rect) else: return load_cloudupdrs_data(filename)
def load_data(filename, format_file='cloudupdrs', button_left_rect=None, button_right_rect=None)
This is a general load data method where the format of data to load can be passed as a parameter, :param filename: The path to load data from :type filename: str :param format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. :type format_file: str :param button_left_rect: mpower param :type button_left_rect: str :param button_right_rect: mpower param :type button_right_rect: str
2.32948
2.28997
1.017254
integrate = sum(signal[1:]) / sampling_frequency + sum(signal[:-1]) integrate /= sampling_frequency * 2 return np.array(integrate)
def numerical_integration(signal, sampling_frequency)
Numerically integrate a signal with it's sampling frequency. :param signal: A 1-dimensional array or list (the signal). :type signal: array :param sampling_frequency: The sampling frequency for the signal. :type sampling_frequency: float :return: The integrated signal. :rtype: numpy.ndarray
6.756484
11.608173
0.582045
signal = np.array(signal) n = len(signal) variance = signal.var() signal -= signal.mean() r = np.correlate(signal, signal, mode = 'full')[-n:] result = r / (variance * (np.arange(n, 0, -1))) return np.array(result)
def autocorrelation(signal)
The `correlation <https://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_ of a signal with a delayed copy of itself. :param signal: A 1-dimensional array or list (the signal). :type signal: array :return: The autocorrelated signal. :rtype: numpy.ndarray
3.01528
3.600658
0.837425
maxtab = [] mintab = [] if x is None: x = np.arange(len(signal)) v = np.asarray(signal) if len(v) != len(x): sys.exit('Input vectors v and x must have same length') if not np.isscalar(delta): sys.exit('Input argument delta must be a scalar') if delta <= 0: sys.exit('Input argument delta must be positive') mn, mx = np.inf, -np.inf mnpos, mxpos = np.nan, np.nan lookformax = True for i in np.arange(len(v)): this = v[i] if this > mx: mx = this mxpos = x[i] if this < mn: mn = this mnpos = x[i] if lookformax: if this < mx - delta: maxtab.append((mxpos, mx)) mn = this mnpos = x[i] lookformax = False else: if this > mn + delta: mintab.append((mnpos, mn)) mx = this mxpos = x[i] lookformax = True return np.array(maxtab), np.array(mintab)
def peakdet(signal, delta, x=None)
Find the local maxima and minima (peaks) in a 1-dimensional signal. Converted from MATLAB script <http://billauer.co.il/peakdet.html> :param array signal: A 1-dimensional array or list (the signal). :type signal: array :param delta: The peak threashold. A point is considered a maximum peak if it has the maximal value, and was preceded (to the left) by a value lower by delta. :type delta: float :param x: Indices in local maxima and minima are replaced with the corresponding values in x (None default). :type x: array :return maxtab: The highest peaks. :rtype maxtab: numpy.ndarray :return mintab: The lowest peaks. :rtype mintab: numpy.ndarray
1.217715
1.281883
0.949942
# Real part of FFT: freqs = fftfreq(data.size, d=1.0/sample_rate) f_signal = rfft(data) # Maximum non-zero frequency: imax_freq = np.argsort(f_signal)[-2] freq = np.abs(freqs[imax_freq]) # Inter-peak samples: interpeak = np.int(np.round(sample_rate / freq)) return interpeak
def compute_interpeak(data, sample_rate)
Compute number of samples between signal peaks using the real part of FFT. :param data: 1-dimensional time series data. :type data: array :param sample_rate: Sample rate of accelerometer reading (Hz) :type sample_rate: float :return interpeak: Number of samples between peaks :rtype interpeak: int :Examples: >>> import numpy as np >>> from mhealthx.signals import compute_interpeak >>> data = np.random.random(10000) >>> sample_rate = 100 >>> interpeak = compute_interpeak(data, sample_rate)
4.07512
4.018949
1.013977
nyquist = 0.5 * sample_rate normal_cutoff = cutoff / nyquist b, a = butter(order, normal_cutoff, btype='low', analog=False) if plot: w, h = freqz(b, a, worN=8000) plt.subplot(2, 1, 1) plt.plot(0.5*sample_rate*w/np.pi, np.abs(h), 'b') plt.plot(cutoff, 0.5*np.sqrt(2), 'ko') plt.axvline(cutoff, color='k') plt.xlim(0, 0.5*sample_rate) plt.title("Lowpass Filter Frequency Response") plt.xlabel('Frequency [Hz]') plt.grid() plt.show() y = lfilter(b, a, data) return y
def butter_lowpass_filter(data, sample_rate, cutoff=10, order=4, plot=False)
`Low-pass filter <http://stackoverflow.com/questions/25191620/ creating-lowpass-filter-in-scipy-understanding-methods-and-units>`_ data by the [order]th order zero lag Butterworth filter whose cut frequency is set to [cutoff] Hz. :param data: time-series data, :type data: numpy array of floats :param: sample_rate: data sample rate :type sample_rate: integer :param cutoff: filter cutoff :type cutoff: float :param order: order :type order: integer :return y: low-pass-filtered data :rtype y: numpy array of floats :Examples: >>> from mhealthx.signals import butter_lowpass_filter >>> data = np.random.random(100) >>> sample_rate = 10 >>> cutoff = 5 >>> order = 4 >>> y = butter_lowpass_filter(data, sample_rate, cutoff, order)
1.617168
1.863644
0.867745
import numpy as np if isinstance(data, np.ndarray): pass elif isinstance(data, list): data = np.asarray(data) else: raise IOError('data should be a numpy array') pos = data > 0 crossings = (pos[:-1] & ~pos[1:]).nonzero()[0] return crossings
def crossings_nonzero_pos2neg(data)
Find `indices of zero crossings from positive to negative values <http://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python>`_. :param data: numpy array of floats :type data: numpy array of floats :return crossings: crossing indices to data :rtype crossings: numpy array of integers :Examples: >>> import numpy as np >>> from mhealthx.signals import crossings_nonzero_pos2neg >>> data = np.random.random(100) >>> crossings = crossings_nonzero_pos2neg(data)
2.804954
3.368599
0.832677
# Autocorrelation: coefficients = correlate(data, data, 'full') size = np.int(coefficients.size/2) coefficients = coefficients[size:] N = coefficients.size # Unbiased: if unbias: if unbias == 1: coefficients /= (N - np.arange(N)) elif unbias == 2: coefficient_ratio = coefficients[0]/coefficients[-1] coefficients /= np.linspace(coefficient_ratio, 1, N) else: raise IOError("unbias should be set to 1, 2, or None") # Normalize: if normalize: if normalize == 1: coefficients /= np.abs(coefficients[0]) elif normalize == 2: coefficients /= np.max(np.abs(coefficients)) else: raise IOError("normalize should be set to 1, 2, or None") return coefficients, N
def autocorrelate(data, unbias=2, normalize=2)
Compute the autocorrelation coefficients for time series data. Here we use scipy.signal.correlate, but the results are the same as in Yang, et al., 2012 for unbias=1: "The autocorrelation coefficient refers to the correlation of a time series with its own past or future values. iGAIT uses unbiased autocorrelation coefficients of acceleration data to scale the regularity and symmetry of gait. The autocorrelation coefficients are divided by :math:`fc(0)`, so that the autocorrelation coefficient is equal to :math:`1` when :math:`t=0`: .. math:: NFC(t) = \\frac{fc(t)}{fc(0)} Here :math:`NFC(t)` is the normalised autocorrelation coefficient, and :math:`fc(t)` are autocorrelation coefficients." :param data: time series data :type data: numpy array :param unbias: autocorrelation, divide by range (1) or by weighted range (2) :type unbias: integer or None :param normalize: divide by 1st coefficient (1) or by maximum abs. value (2) :type normalize: integer or None :return coefficients: autocorrelation coefficients [normalized, unbiased] :rtype coefficients: numpy array :return N: number of coefficients :rtype N: integer :Examples: >>> import numpy as np >>> from mhealthx.signals import autocorrelate >>> data = np.random.random(100) >>> unbias = 2 >>> normalize = 2 >>> plot_test = True >>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test)
2.486693
2.656185
0.93619
peaks, _ = sig.find_peaks(data) prominences = sig.peak_prominences(data, peaks)[0] return peaks, prominences
def get_signal_peaks_and_prominences(data)
Get the signal peaks and peak prominences. :param data array: One-dimensional array. :return peaks array: The peaks of our signal. :return prominences array: The prominences of the peaks.
2.27422
3.609223
0.630114
for i in range(len(data) - sum(window)): start_window_from = i start_window_to = i+window[0] end_window_from = start_window_to + window[1] end_window_to = end_window_from + window[2] if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]): data[start_window_from: end_window_to] = data[start_window_from] return data
def smoothing_window(data, window=[1, 1, 1])
This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data.
2.107103
2.130033
0.989235
fig, ax = plt.subplots(figsize=figsize) plt.plot(data); for segment in np.unique(segment_indexes): plt.plot(peaks[np.where(segment_indexes == segment)[0]], data[peaks][np.where(segment_indexes == segment)[0]], 'o') plt.show()
def plot_segmentation(data, peaks, segment_indexes, figsize=(10, 5))
Will plot the data and segmentation based on the peaks and segment indexes. :param 1d-array data: The orginal axis of the data that was segmented into sections. :param 1d-array peaks: Peaks of the data. :param 1d-array segment_indexes: These are the different classes, corresponding to each peak. Will not return anything, instead it will plot the data and peaks with different colors for each class.
2.641391
2.763082
0.955958
clusters, peaks, promi = cluster_walk_turn(data, window=window) group_one = [] group_two = [] start = 0 for i in range(1, len(clusters)): if clusters[i-1] != clusters[i]: assert np.all(clusters[start: i] == clusters[start]), 'Some values are mixed up, please check!' add = group_one if clusters[start] == 0 else group_two add.append(peaks[start: i]) start = i # hacky fix for the last part of the signal ... # I need to change this ... if i == len(clusters)-1: if not peaks[start] in add[-1]: add = group_one if clusters[start] == 0 else group_two add.append(peaks[start: ]) maxes_one = [np.max(data[c]) for c in group_one] maxes_two = [np.max(data[c]) for c in group_two] walks, turns = group_two, group_one if np.max(maxes_one) > np.max(maxes_two): walks, turns = group_one, group_two # let's drop any turns at the end of the signal # if len(turns[-1]) > len(walks[-1]): # turns.pop() return walks, turns
def separate_walks_turns(data, window=[1, 1, 1])
Will separate peaks into the clusters by following the trend in the clusters array. This is usedful because scipy's k-mean clustering will give us a continous clusters array. :param clusters array: A continous array representing different classes. :param peaks array: The peaks that we want to separate into the classes from the custers. :return walks arrays: An array of arrays that will have all the peaks corresponding to every individual walk. :return turns arraays: Array of array which has all the indices of the peaks that correspond to turning.
3.528565
3.708212
0.951554
dimensions = len(centroids[0]) negative_base_point = array(dimensions*[-100]) decorated = [ (euclidean(centroid, negative_base_point), centroid) for centroid in centroids ] decorated.sort() return array([centroid for dist, centroid in decorated])
def centroid_sort(centroids)
Sort centroids. This is required so that the same cluster centroid is always the 0th one. It should also be the \ most negative. Order defined by the Euclidean distance between the centroid and an arbitrary "small" point \ [-100, -100] (in each dimension) to account for possible negatives. Cluster 0 is the closest to that point, etc. 0. Set up >>> from numpy.testing import assert_array_equal 1. Single centroids just return themselves. >>> centroid_sort(array([[1.1, 2.2]])) array([[ 1.1, 2.2]]) >>> centroid_sort(array([[1.1, 2.2, 3.3]])) array([[ 1.1, 2.2, 3.3]]) 2. Positive 2d centroids are ordered. >>> centroids = array([ ... [5.34443858, 0.63266844], # 3 ... [2.69156877, 0.76448578], # 1 ... [4.74784197, 1.0815235 ], # 2 ... [1.02330015, 0.16788118], # 0 ... ]) >>> expected_sorted_centroids = array([ ... [1.02330015, 0.16788118], # 0 ... [2.69156877, 0.76448578], # 1 ... [4.74784197, 1.0815235 ], # 2 ... [5.34443858, 0.63266844], # 3 ... ]) >>> result = centroid_sort(centroids) >>> assert_array_equal(result, expected_sorted_centroids) 3. 3d centroids spanning the origin are ordered. >>> centroids = array([ ... [ 3, 3, 4 ], # 3 ... [ 1.5, 2, 3 ], # 2 ... [-1, -1, -1 ], # 0 ... [ 0, 1, 0.5], # 1 ... ]) >>> expected_sorted_centroids = array([ ... [-1, -1, -1 ], # 0 ... [ 0, 1, 0.5], # 1 ... [ 1.5, 2, 3 ], # 2 ... [ 3, 3, 4 ], # 3 ... ]) >>> result = centroid_sort(centroids) >>> assert_array_equal(result, expected_sorted_centroids) :param centroids: array centroids :type centroids: numpy array :return centroids: array centroids :rtype centroids: numpy array
5.261602
5.174355
1.016861
for index, row in enumerate(arr): if non_zero_row(row): return index raise ValueError('No non-zero values')
def non_zero_index(arr)
Raises: ValueError: If no-non-zero rows can be found. 0. Empty array raises. >>> arr = array([]) >>> non_zero_index(arr) 1. Array with zero values raises. >>> arr = array([ ... [0, 0], ... [0, 0], ... [0, 0, 0], ... ]) >>> non_zero_index(arr) 2. Array with a non-zero value will have that index returned. >>> arr = array([ ... [0, 0], ... [0, 0, 0], ... [1, 0, 0], # Still has zeros ... [1, 1, 0], ... [0, 1, 1], ... [-1, 0, 0], ... [-1, 2, 3], # First non-zero array ... [1, 2, 3], ... ]) >>> non_zero_index(arr) 6 :param arr: array :type arr: numpy array :return index: Index of first non-zero entry in an array. :rtype index: int
3.831163
4.461995
0.858621
if len(arr) == 0: return False for item in arr: if item == 0: return False return True
def non_zero_row(arr)
0. Empty row returns False. >>> arr = array([]) >>> non_zero_row(arr) False 1. Row with a zero returns False. >>> arr = array([1, 4, 3, 0, 5, -1, -2]) >>> non_zero_row(arr) False 2. Row with no zeros returns True. >>> arr = array([-1, -0.1, 0.001, 2]) >>> non_zero_row(arr) True :param arr: array :type arr: numpy array :return empty: If row is completely free of zeros :rtype empty: bool
3.089794
3.465142
0.891679
overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
def window_features(idx, window_size=100, overlap=10)
Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap.
3.168501
3.16959
0.999656
try: response = requests.get(PYPI_URL, timeout=HALF_SECOND_TIMEOUT) response.raise_for_status() data = response.json() version_str = data["info"]["version"] return _parse_version_str(version_str) except requests.exceptions.ConnectionError: raise VersionException(UNABLE_TO_ACCESS_PYPI + " Failed to connect.") except requests.exceptions.Timeout: raise VersionException(UNABLE_TO_ACCESS_PYPI + " Timeout")
def get_pypi_version()
Returns the version info from pypi for this app.
2.883511
2.802714
1.028828
return self._create_item_response( self.data_service.get_project_by_id(project_id), Project)
def get_project_by_id(self, project_id)
Get details about project with the specified uuid :param project_id: str: uuid of the project to fetch :return: Project
5.612062
7.052692
0.795733
return self._create_item_response( self.data_service.create_project(name, description), Project)
def create_project(self, name, description)
Create a new project with the specified name and description :param name: str: name of the project to create :param description: str: description of the project to create :return: Project
7.082603
9.800038
0.722712
return self._create_item_response( self.data_service.create_folder(folder_name, parent_kind_str, parent_uuid), Folder )
def create_folder(self, folder_name, parent_kind_str, parent_uuid)
Create a folder under a particular parent :param folder_name: str: name of the folder to create :param parent_kind_str: str: kind of the parent of this folder :param parent_uuid: str: uuid of the parent of this folder (project or another folder) :return: Folder: folder metadata
4.640178
5.590753
0.829974
return self._create_array_response( self.data_service.get_project_children( project_id, name_contains ), DDSConnection._folder_or_file_constructor )
def get_project_children(self, project_id, name_contains=None)
Get direct files and folders of a project. :param project_id: str: uuid of the project to list contents :param name_contains: str: filter children based on a pattern :return: [File|Folder]: list of Files/Folders contained by the project
9.712928
10.716325
0.906367
return self._create_array_response( self.data_service.get_folder_children( folder_id, name_contains ), DDSConnection._folder_or_file_constructor )
def get_folder_children(self, folder_id, name_contains=None)
Get direct files and folders of a folder. :param folder_id: str: uuid of the folder :param name_contains: str: filter children based on a pattern :return: File|Folder
8.382789
10.295452
0.814222
return self._create_item_response( self.data_service.get_file_url(file_id), FileDownload )
def get_file_download(self, file_id)
Get a file download object that contains temporary url settings needed to download the contents of a file. :param file_id: str: uuid of the file :return: FileDownload
7.43793
8.848359
0.8406
path_data = PathData(local_path) hash_data = path_data.get_hash() file_upload_operations = FileUploadOperations(self.data_service, None) upload_id = file_upload_operations.create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider=self.config.storage_provider_id) context = UploadContext(self.config, self.data_service, upload_id, path_data) ParallelChunkProcessor(context).run() remote_file_data = file_upload_operations.finish_upload(upload_id, hash_data, parent_data, existing_file_id) return File(self, remote_file_data)
def upload_file(self, local_path, project_id, parent_data, existing_file_id=None, remote_filename=None)
Upload a file under a specific location in DDSConnection possibly replacing an existing file. :param local_path: str: path to a local file to upload :param project_id: str: uuid of the project to add this file to :param parent_data: ParentData: info about the parent of this file :param existing_file_id: str: uuid of file to create a new version of (or None to create a new file) :param remote_filename: str: name to use for our remote file (defaults to local_path basename otherwise) :return: File
3.529864
3.801152
0.92863
kind = data_dict['kind'] if kind == KindType.folder_str: return Folder(dds_connection, data_dict) elif data_dict['kind'] == KindType.file_str: return File(dds_connection, data_dict)
def _folder_or_file_constructor(dds_connection, data_dict)
Create a File or Folder based on the kind value in data_dict :param dds_connection: DDSConnection :param data_dict: dict: payload received from DDSConnection API :return: File|Folder
3.052965
3.035907
1.005619
return self._create_item_response( self.data_service.get_folder(folder_id), Folder )
def get_folder_by_id(self, folder_id)
Get folder details for a folder id. :param folder_id: str: uuid of the folder :return: Folder
6.80095
9.505779
0.715454
return self._create_item_response( self.data_service.get_file(file_id), File )
def get_file_by_id(self, file_id)
Get folder details for a file id. :param file_id: str: uuid of the file :return: File
6.919812
8.970568
0.771391
response = self._get_download_response() with open(file_path, 'wb') as f: for chunk in response.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk)
def save_to_path(self, file_path, chunk_size=DOWNLOAD_FILE_CHUNK_SIZE)
Save the contents of the remote file to a local path. :param file_path: str: file path :param chunk_size: chunk size used to write local file
1.911512
2.061412
0.927283
path_parts = self.remote_path.split(os.sep) return self._get_child_recurse(path_parts, self.node)
def get_child(self)
Find file or folder at the remote_path :return: File|Folder
6.733872
5.088938
1.323237
try: result = task_func(context) return task_id, result except: # Put all exception text into an exception and raise that so main process will print this out raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def execute_task_async(task_func, task_id, context)
Global function run for Task. multiprocessing requires a top level function. :param task_func: function: function to run (must be pickle-able) :param task_id: int: unique id of this task :param context: object: single argument to task_func (must be pickle-able) :return: (task_id, object): return passed in task id and result object
6.479991
5.915905
1.095351
wait_id = task.wait_for_task_id task_list = self.wait_id_to_task.get(wait_id, []) task_list.append(task) self.wait_id_to_task[wait_id] = task_list
def add(self, task)
Add this task to the lookup based on it's wait_for_task_id property. :param task: Task: task to add to the list
2.73152
2.166457
1.260823
task_id = self._claim_next_id() self.waiting_task_list.add(Task(task_id, parent_task_id, command)) return task_id
def add(self, parent_task_id, command)
Create a task for the command that will wait for parent_task_id before starting. :param parent_task_id: int: id of task to wait for or None if it can start immediately :param command: TaskCommand: contains data function to run :return: int: task id we created for this command
4.319171
4.550402
0.949185
for task in self.get_next_tasks(None): self.executor.add_task(task, None) while not self.executor.is_done(): done_task_and_result = self.executor.wait_for_tasks() for task, task_result in done_task_and_result: self._add_sub_tasks_to_executor(task, task_result)
def run(self)
Runs all tasks in this runner on the executor. Blocks until all tasks have been completed. :return:
4.089229
3.886744
1.052096
for sub_task in self.waiting_task_list.get_next_tasks(parent_task.id): self.executor.add_task(sub_task, parent_task_result)
def _add_sub_tasks_to_executor(self, parent_task, parent_task_result)
Add all subtasks for parent_task to the executor. :param parent_task: Task: task that has just finished :param parent_task_result: object: result of task that is finished
3.83082
3.865302
0.991079
self.tasks.append((task, parent_task_result)) self.task_id_to_task[task.id] = task
def add_task(self, task, parent_task_result)
Add a task to run with the specified result from this tasks parent(can be None) :param task: Task: task that should be run :param parent_task_result: object: value to be passed to task for setup
2.772632
3.439016
0.806228
finished_tasks_and_results = [] while len(finished_tasks_and_results) == 0: if self.is_done(): break self.start_tasks() self.process_all_messages_in_queue() finished_tasks_and_results = self.get_finished_results() return finished_tasks_and_results
def wait_for_tasks(self)
Wait for one or more tasks to finish or return empty list if we are done. Starts new tasks if we have less than task_at_once currently running. :return: [(Task,object)]: list of (task,result) for finished tasks
3.475466
3.11873
1.114385
while self.tasks_at_once > len(self.pending_results) and self._has_more_tasks(): task, parent_result = self.tasks.popleft() self.execute_task(task, parent_result)
def start_tasks(self)
Start however many tasks we can based on our limits and what we have left to finish.
6.276598
5.225015
1.201259
task.before_run(parent_result) context = task.create_context(self.message_queue) pending_result = self.pool.apply_async(execute_task_async, (task.func, task.id, context)) self.pending_results.append(pending_result)
def execute_task(self, task, parent_result)
Run a single task in another process saving the result to our list of pending results. :param task: Task: function and data we can run in another process :param parent_result: object: result from our parent task
4.775263
4.960319
0.962693
try: message = self.message_queue.get_nowait() task_id, data = message task = self.task_id_to_task[task_id] task.on_message(data) return True except queue.Empty: return False
def process_single_message_from_queue(self)
Tries to read a single message from the queue and let the associated task process it. :return: bool: True if we processed a message, otherwise False
2.672872
2.391397
1.117703
task_and_results = [] for pending_result in self.pending_results: if pending_result.ready(): ret = pending_result.get() task_id, result = ret task = self.task_id_to_task[task_id] # process any pending messages for this task (will also process other tasks messages) self.process_all_messages_in_queue() task.after_run(result) task_and_results.append((task, result)) self.pending_results.remove(pending_result) return task_and_results
def get_finished_results(self)
Go through pending results and retrieve the results if they are done. Then start child tasks for the task that finished.
3.724828
3.332269
1.117805
doctype = b if body_element is None: return succeed(b"") d = flattenString(None, body_element) d.addCallback(lambda flattened: doctype + flattened) return d
def to_xml(body_element)
Serialize a L{twisted.web.template.Tag} to a UTF-8 encoded XML document with an XML doctype header.
7.085278
6.436985
1.100714
if cooperator is None: cooperator = task return region.get_client( _Route53Client, agent=agent, creds=region.creds, region=REGION_US_EAST_1, endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT), cooperator=cooperator, )
def get_route53_client(agent, region, cooperator=None)
Get a non-registration Route53 client.
5.428886
5.085878
1.067443
op = _Operation(service=b"route53", **kw) if body is None: return succeed(op) d = to_xml(body) d.addCallback(lambda body: attr.assoc(op, body=body)) return d
def _route53_op(body=None, **kw)
Construct an L{_Operation} representing a I{Route53} service API call.
6.185495
4.832296
1.280032
return HostedZone( name=maybe_bytes_to_unicode(zone.find("Name").text).encode("ascii").decode("idna"), identifier=maybe_bytes_to_unicode(zone.find("Id").text).replace(u"/hostedzone/", u""), rrset_count=int(zone.find("ResourceRecordSetCount").text), reference=maybe_bytes_to_unicode(zone.find("CallerReference").text), )
def hostedzone_from_element(zone)
Construct a L{HostedZone} instance from a I{HostedZone} XML element.
3.253748
3.09414
1.051584
return tags.Change( tags.Action( change.action, ), tags.ResourceRecordSet( tags.Name( unicode(change.rrset.label), ), tags.Type( change.rrset.type, ), tags.TTL( u"{}".format(change.rrset.ttl), ), tags.ResourceRecords(list( tags.ResourceRecord(tags.Value(rr.to_text())) for rr in sorted(change.rrset.records) )) ), )
def to_element(change)
@param change: An L{txaws.route53.interface.IRRSetChange} provider. @return: The L{twisted.web.template} element which describes this change.
3.208348
3.352736
0.956934
by_version = self._by_action.setdefault(action, {}) if version in by_version: raise RuntimeError("A method was already registered for action" " %s in version %s" % (action, version)) by_version[version] = method_class
def add(self, method_class, action, version=None)
Add a method class to the regitry. @param method_class: The method class to add @param action: The action that the method class can handle @param version: The version that the method class can handle
3.318168
3.879138
0.855388
if action not in self._by_action: raise APIError(400, "InvalidAction", "The action %s is not valid " "for this web service." % action) by_version = self._by_action[action] if None not in by_version: # There's no catch-all method, let's try the version-specific one if version not in by_version: raise APIError(400, "InvalidVersion", "Invalid API version.")
def check(self, action, version=None)
Check if the given action is supported in the given version. @raises APIError: If there's no method class registered for handling the given action or version.
4.15211
3.608025
1.150798
by_version = self._by_action[action] if version in by_version: return by_version[version] else: return by_version[None]
def get(self, action, version=None)
Get the method class handing the given action and version.
3.243943
2.870968
1.129913
from venusian import Scanner scanner = Scanner(registry=self) kwargs = {"onerror": onerror, "categories": ["method"]} if ignore is not None: # Only pass it if specified, for backward compatibility kwargs["ignore"] = ignore scanner.scan(module, **kwargs)
def scan(self, module, onerror=None, ignore=None)
Scan the given module object for L{Method}s and register them.
5.627964
5.034511
1.117877
listX, listY = self.match_time_series(independentTs, dependentTs) if len(listX) == 0 or len(listY) == 0: raise ValueError("Lists need to have some equal dates or cannot be empty") if len(listX) != len(listY): raise ValueError("Each Timeseries need to have distinct dates") xValues = map(lambda item: item[1], listX) yValues = map(lambda item: item[1], listY) xMean = FusionMethods["mean"](xValues) yMean = FusionMethods["mean"](yValues) xDeviation = map(lambda item: (item - xMean), xValues) yDeviation = map(lambda item: (item - yMean), yValues) try: parameter1 = sum(x * y for x, y in zip(xDeviation, yDeviation)) / sum(x * x for x in xDeviation) except ZeroDivisionError: # error occures if xDeviation is always 0, which means that all x values are the same raise ValueError("Not enough distinct x values") parameter0 = yMean - (parameter1 * xMean) return (parameter0, parameter1)
def calculate_parameters(self, independentTs, dependentTs)
Calculate and return the parameters for the regression line Return the parameter for the line describing the relationship between the input variables. :param Timeseries independentTs: The Timeseries used for the independent variable (x-axis). The Timeseries must have at least 2 datapoints with different dates and values :param Timeseries dependentTs: The Timeseries used as the dependent variable (y-axis). The Timeseries must have at least 2 datapoints, which dates match with independentTs :return: A tuple containing the y-axis intercept and the slope used to execute the regression :rtype: tuple :raise: Raises an :py:exc:`ValueError` if - independentTs and dependentTs have not at least two matching dates - independentTs has only one distinct value - The dates in one or both Timeseries are not distinct.
3.27689
3.133293
1.04583
#First split the time series into sample and training data sampleY, trainingY = dependentTs.sample(samplePercentage) sampleX_list = self.match_time_series(sampleY, independentTs)[1] trainingX_list = self.match_time_series(trainingY, independentTs)[1] sampleX = TimeSeries.from_twodim_list(sampleX_list) trainingX = TimeSeries.from_twodim_list(trainingX_list) #Then calculate parameters based on the training data n, m = self.calculate_parameters(trainingX, trainingY) #predict prediction = self.predict(sampleX, n, m) #calculate the signed error at each location, note that MSD(x,y) != MSD(y,x) msd = MSD() msd.initialize(prediction, sampleY) return (n, m, msd.confidence_interval(confidenceLevel))
def calculate_parameters_with_confidence(self, independentTs, dependentTs, confidenceLevel, samplePercentage=.1)
Same functionality as calculate_parameters, just that additionally the confidence interval for a given confidenceLevel is calculated. This is done based on a sample of the dependentTs training data that is validated against the prediction. The signed error of the predictions and the sample is then used to calculate the bounds of the interval. further reading: http://en.wikipedia.org/wiki/Confidence_interval :param Timeseries independentTs: The Timeseries used for the independent variable (x-axis). The Timeseries must have at least 2 datapoints with different dates and values :param Timeseries dependentTs: The Timeseries used as the dependent variable (y-axis). The Timeseries must have at least 2 datapoints, which dates match with independentTs :param float confidenceLevel: The percentage of entries in the sample that should have an prediction error closer or equal to 0 than the bounds of the confidence interval. :param float samplePercentage: How much of the dependentTs should be used for sampling :return: A tuple containing the y-axis intercept and the slope used to execute the regression and the (underestimation, overestimation) for the given confidenceLevel :rtype: tuple :raise: Raises an :py:exc:`ValueError` if - independentTs and dependentTs have not at least two matching dates - independentTs has only one distinct value - The dates in one or both Timeseries are not distinct.
4.197212
4.178429
1.004495
new_entries = [] for entry in timeseriesX: predicted_value = m * entry[1] + n new_entries.append([entry[0], predicted_value]) return TimeSeries.from_twodim_list(new_entries)
def predict(self, timeseriesX, n, m)
Calculates the dependent timeseries Y for the given parameters and independent timeseries. (y=m*x + n) :param TimeSeries timeseriesX: the independent Timeseries. :param float n: The interception with the x access that has been calculated during regression :param float m: The slope of the function that has been calculated during regression :return TimeSeries timeseries_y: the predicted values for the dependent TimeSeries. Its length and first dimension will equal to timeseriesX.
3.674743
3.738291
0.983001
time1 = map(lambda item: item[0], timeseries1.to_twodim_list()) time2 = map(lambda item: item[0], timeseries2.to_twodim_list()) matches = filter(lambda x: (x in time1), time2) listX = filter(lambda x: (x[0] in matches), timeseries1.to_twodim_list()) listY = filter(lambda x: (x[0] in matches), timeseries2.to_twodim_list()) return listX, listY
def match_time_series(self, timeseries1, timeseries2)
Return two lists of the two input time series with matching dates :param TimeSeries timeseries1: The first timeseries :param TimeSeries timeseries2: The second timeseries :return: Two two dimensional lists containing the matched values, :rtype: two List
2.619368
2.480544
1.055965