code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
d = self.authorize_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d | def authorize_group_permission(
self, group_name, source_group_name, source_group_owner_id) | This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | 2.670598 | 2.069216 | 1.290633 |
d = self.authorize_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d | def authorize_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip) | This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | 2.299076 | 2.151895 | 1.068396 |
d = self.revoke_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d | def revoke_group_permission(
self, group_name, source_group_name, source_group_owner_id) | This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}. | 2.732391 | 2.21158 | 1.235493 |
d = self.revoke_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d | def revoke_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip) | This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}. | 2.380751 | 2.248435 | 1.058848 |
volumeset = {}
for pos, volume_id in enumerate(volume_ids):
volumeset["VolumeId.%d" % (pos + 1)] = volume_id
query = self.query_factory(
action="DescribeVolumes", creds=self.creds, endpoint=self.endpoint,
other_params=volumeset)
d = query.submit()
return d.addCallback(self.parser.describe_volumes) | def describe_volumes(self, *volume_ids) | Describe available volumes. | 4.3948 | 4.304664 | 1.020939 |
params = {"AvailabilityZone": availability_zone}
if ((snapshot_id is None and size is None) or
(snapshot_id is not None and size is not None)):
raise ValueError("Please provide either size or snapshot_id")
if size is not None:
params["Size"] = str(size)
if snapshot_id is not None:
params["SnapshotId"] = snapshot_id
query = self.query_factory(
action="CreateVolume", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.create_volume) | def create_volume(self, availability_zone, size=None, snapshot_id=None) | Create a new volume. | 2.762113 | 2.69627 | 1.02442 |
snapshot_set = {}
for pos, snapshot_id in enumerate(snapshot_ids):
snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id
query = self.query_factory(
action="DescribeSnapshots", creds=self.creds,
endpoint=self.endpoint, other_params=snapshot_set)
d = query.submit()
return d.addCallback(self.parser.snapshots) | def describe_snapshots(self, *snapshot_ids) | Describe available snapshots.
TODO: ownerSet, restorableBySet | 4.13298 | 4.008595 | 1.03103 |
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) | def delete_snapshot(self, snapshot_id) | Remove a previously created snapshot. | 7.695334 | 7.131026 | 1.079134 |
query = self.query_factory(
action="AttachVolume", creds=self.creds, endpoint=self.endpoint,
other_params={"VolumeId": volume_id, "InstanceId": instance_id,
"Device": device})
d = query.submit()
return d.addCallback(self.parser.attach_volume) | def attach_volume(self, volume_id, instance_id, device) | Attach the given volume to the specified instance at C{device}. | 4.470484 | 4.566604 | 0.978952 |
keypairs = {}
for index, keypair_name in enumerate(keypair_names):
keypairs["KeyName.%d" % (index + 1)] = keypair_name
query = self.query_factory(
action="DescribeKeyPairs", creds=self.creds,
endpoint=self.endpoint, other_params=keypairs)
d = query.submit()
return d.addCallback(self.parser.describe_keypairs) | def describe_keypairs(self, *keypair_names) | Returns information about key pairs available. | 3.770622 | 3.62689 | 1.039629 |
query = self.query_factory(
action="CreateKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name})
d = query.submit()
return d.addCallback(self.parser.create_keypair) | def create_keypair(self, keypair_name) | Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances. | 6.056419 | 5.862619 | 1.033057 |
query = self.query_factory(
action="ImportKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name,
"PublicKeyMaterial": b64encode(key_material)})
d = query.submit()
return d.addCallback(self.parser.import_keypair, key_material) | def import_keypair(self, keypair_name, key_material) | Import an existing SSH key into EC2. It supports:
* OpenSSH public key format (e.g., the format in
~/.ssh/authorized_keys)
* Base64 encoded DER format
* SSH public key file format as specified in RFC4716
@param keypair_name: The name of the key to create.
@param key_material: The material in one of the supported format.
@return: A L{Deferred} firing with a L{model.Keypair} instance if
successful.
TODO: there is no corresponding method in the 2009-11-30 version
of the ec2 wsdl. Delete this? | 4.689326 | 4.460768 | 1.051237 |
# XXX remove empty other_params
query = self.query_factory(
action="AllocateAddress", creds=self.creds, endpoint=self.endpoint,
other_params={})
d = query.submit()
return d.addCallback(self.parser.allocate_address) | def allocate_address(self) | Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated. | 9.470846 | 9.180939 | 1.031577 |
query = self.query_factory(
action="ReleaseAddress", creds=self.creds, endpoint=self.endpoint,
other_params={"PublicIp": address})
d = query.submit()
return d.addCallback(self.parser.truth_return) | def release_address(self, address) | Release a previously allocated address returned by C{allocate_address}.
@return: C{True} if the operation succeeded. | 10.581806 | 12.964411 | 0.81622 |
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses) | def describe_addresses(self, *addresses) | List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}. | 5.342552 | 5.911082 | 0.90382 |
instances = []
for instance_data in root.find("instancesSet"):
instances.append(self.instance(instance_data, reservation))
return instances | def instances_set(self, root, reservation) | Parse instance data out of an XML payload.
@param root: The root node of the XML payload.
@param reservation: The L{Reservation} associated with the instances
from the response.
@return: A C{list} of L{Instance}s. | 3.436747 | 4.329862 | 0.793731 |
for group_data in instance_data.find("groupSet"):
group_id = group_data.findtext("groupId")
group_name = group_data.findtext("groupName")
reservation.groups.append((group_id, group_name))
instance_id = instance_data.findtext("instanceId")
instance_state = instance_data.find(
"instanceState").findtext("name")
private_dns_name = instance_data.findtext("privateDnsName")
dns_name = instance_data.findtext("dnsName")
private_ip_address = instance_data.findtext("privateIpAddress")
ip_address = instance_data.findtext("ipAddress")
key_name = instance_data.findtext("keyName")
ami_launch_index = instance_data.findtext("amiLaunchIndex")
products = []
product_codes = instance_data.find("productCodes")
if product_codes is not None:
for product_data in instance_data.find("productCodes"):
products.append(product_data.text)
instance_type = instance_data.findtext("instanceType")
launch_time = instance_data.findtext("launchTime")
placement = instance_data.find("placement").findtext(
"availabilityZone")
kernel_id = instance_data.findtext("kernelId")
ramdisk_id = instance_data.findtext("ramdiskId")
image_id = instance_data.findtext("imageId")
instance = model.Instance(
instance_id, instance_state, instance_type, image_id,
private_dns_name, dns_name, private_ip_address, ip_address,
key_name, ami_launch_index, launch_time, placement, products,
kernel_id, ramdisk_id, reservation=reservation)
return instance | def instance(self, instance_data, reservation) | Parse instance data out of an XML payload.
@param instance_data: An XML node containing instance data.
@param reservation: The L{Reservation} associated with the instance.
@return: An L{Instance}.
TODO: reason, platform, monitoring, subnetId, vpcId, privateIpAddress,
ipAddress, stateReason, architecture, rootDeviceName,
blockDeviceMapping, instanceLifecycle, spotInstanceRequestId. | 1.694077 | 1.633599 | 1.037021 |
root = XML(xml_bytes)
results = []
# May be a more elegant way to do this:
for reservation_data in root.find("reservationSet"):
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=reservation_data.findtext("reservationId"),
owner_id=reservation_data.findtext("ownerId"))
# Get the list of instances.
instances = self.instances_set(
reservation_data, reservation)
results.extend(instances)
return results | def describe_instances(self, xml_bytes) | Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS. | 4.264593 | 4.34169 | 0.982243 |
root = XML(xml_bytes)
# Get the security group information.
groups = []
for group_data in root.find("groupSet"):
group_id = group_data.findtext("groupId")
groups.append(group_id)
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=root.findtext("reservationId"),
owner_id=root.findtext("ownerId"),
groups=groups)
# Get the list of instances.
instances = self.instances_set(root, reservation)
return instances | def run_instances(self, xml_bytes) | Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element. | 3.581682 | 3.432292 | 1.043525 |
root = XML(xml_bytes)
result = []
# May be a more elegant way to do this:
instances = root.find("instancesSet")
if instances is not None:
for instance in instances:
instanceId = instance.findtext("instanceId")
previousState = instance.find("previousState").findtext(
"name")
currentState = instance.find("currentState").findtext(
"name")
result.append((instanceId, previousState, currentState))
return result | def terminate_instances(self, xml_bytes) | Parse the XML returned by the C{TerminateInstances} function.
@param xml_bytes: XML bytes with a C{TerminateInstancesResponse} root
element.
@return: An iterable of C{tuple} of (instanceId, previousState,
currentState) for the ec2 instances that where terminated. | 3.044578 | 2.606808 | 1.167933 |
root = XML(xml_bytes)
result = []
for group_info in root.findall("securityGroupInfo/item"):
id = group_info.findtext("groupId")
name = group_info.findtext("groupName")
description = group_info.findtext("groupDescription")
owner_id = group_info.findtext("ownerId")
allowed_groups = []
allowed_ips = []
ip_permissions = group_info.find("ipPermissions")
if ip_permissions is None:
ip_permissions = ()
for ip_permission in ip_permissions:
# openstack doesn't handle self authorized groups properly
# XXX this is an upstream problem and should be addressed there
# lp bug #829609
ip_protocol = ip_permission.findtext("ipProtocol")
from_port = ip_permission.findtext("fromPort")
to_port = ip_permission.findtext("toPort")
if from_port:
from_port = int(from_port)
if to_port:
to_port = int(to_port)
for groups in ip_permission.findall("groups/item") or ():
user_id = groups.findtext("userId")
group_name = groups.findtext("groupName")
if user_id and group_name:
if (user_id, group_name) not in allowed_groups:
allowed_groups.append((user_id, group_name))
for ip_ranges in ip_permission.findall("ipRanges/item") or ():
cidr_ip = ip_ranges.findtext("cidrIp")
allowed_ips.append(
model.IPPermission(
ip_protocol, from_port, to_port, cidr_ip))
allowed_groups = [model.UserIDGroupPair(user_id, group_name)
for user_id, group_name in allowed_groups]
security_group = model.SecurityGroup(
id, name, description, owner_id=owner_id,
groups=allowed_groups, ips=allowed_ips)
result.append(security_group)
return result | def describe_security_groups(self, xml_bytes) | Parse the XML returned by the C{DescribeSecurityGroups} function.
@param xml_bytes: XML bytes with a C{DescribeSecurityGroupsResponse}
root element.
@return: A list of L{SecurityGroup} instances. | 2.216396 | 2.20072 | 1.007123 |
root = XML(xml_bytes)
result = []
for volume_data in root.find("volumeSet"):
volume_id = volume_data.findtext("volumeId")
size = int(volume_data.findtext("size"))
snapshot_id = volume_data.findtext("snapshotId")
availability_zone = volume_data.findtext("availabilityZone")
status = volume_data.findtext("status")
create_time = volume_data.findtext("createTime")
create_time = datetime.strptime(
create_time[:19], "%Y-%m-%dT%H:%M:%S")
volume = model.Volume(
volume_id, size, status, create_time, availability_zone,
snapshot_id)
result.append(volume)
for attachment_data in volume_data.find("attachmentSet"):
instance_id = attachment_data.findtext("instanceId")
device = attachment_data.findtext("device")
status = attachment_data.findtext("status")
attach_time = attachment_data.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
attachment = model.Attachment(
instance_id, device, status, attach_time)
volume.attachments.append(attachment)
return result | def describe_volumes(self, xml_bytes) | Parse the XML returned by the C{DescribeVolumes} function.
@param xml_bytes: XML bytes with a C{DescribeVolumesResponse} root
element.
@return: A list of L{Volume} instances.
TODO: attachementSetItemResponseType#deleteOnTermination | 1.580902 | 1.56174 | 1.01227 |
root = XML(xml_bytes)
volume_id = root.findtext("volumeId")
size = int(root.findtext("size"))
snapshot_id = root.findtext("snapshotId")
availability_zone = root.findtext("availabilityZone")
status = root.findtext("status")
create_time = root.findtext("createTime")
create_time = datetime.strptime(
create_time[:19], "%Y-%m-%dT%H:%M:%S")
volume = model.Volume(
volume_id, size, status, create_time, availability_zone,
snapshot_id)
return volume | def create_volume(self, xml_bytes) | Parse the XML returned by the C{CreateVolume} function.
@param xml_bytes: XML bytes with a C{CreateVolumeResponse} root
element.
@return: The L{Volume} instance created. | 2.067368 | 2.134619 | 0.968495 |
root = XML(xml_bytes)
result = []
for snapshot_data in root.find("snapshotSet"):
snapshot_id = snapshot_data.findtext("snapshotId")
volume_id = snapshot_data.findtext("volumeId")
status = snapshot_data.findtext("status")
start_time = snapshot_data.findtext("startTime")
start_time = datetime.strptime(
start_time[:19], "%Y-%m-%dT%H:%M:%S")
progress = snapshot_data.findtext("progress")[:-1]
progress = float(progress or "0") / 100.
snapshot = model.Snapshot(
snapshot_id, volume_id, status, start_time, progress)
result.append(snapshot)
return result | def snapshots(self, xml_bytes) | Parse the XML returned by the C{DescribeSnapshots} function.
@param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root
element.
@return: A list of L{Snapshot} instances.
TODO: ownersSet, restorableBySet, ownerId, volumeSize, description,
ownerAlias. | 2.20054 | 2.174262 | 1.012086 |
root = XML(xml_bytes)
snapshot_id = root.findtext("snapshotId")
volume_id = root.findtext("volumeId")
status = root.findtext("status")
start_time = root.findtext("startTime")
start_time = datetime.strptime(
start_time[:19], "%Y-%m-%dT%H:%M:%S")
progress = root.findtext("progress")[:-1]
progress = float(progress or "0") / 100.
return model.Snapshot(
snapshot_id, volume_id, status, start_time, progress) | def create_snapshot(self, xml_bytes) | Parse the XML returned by the C{CreateSnapshot} function.
@param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root
element.
@return: The L{Snapshot} instance created.
TODO: ownerId, volumeSize, description. | 2.418299 | 2.489261 | 0.971493 |
root = XML(xml_bytes)
status = root.findtext("status")
attach_time = root.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
return {"status": status, "attach_time": attach_time} | def attach_volume(self, xml_bytes) | Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device | 2.690947 | 2.310207 | 1.164808 |
results = []
root = XML(xml_bytes)
keypairs = root.find("keySet")
if keypairs is None:
return results
for keypair_data in keypairs:
key_name = keypair_data.findtext("keyName")
key_fingerprint = keypair_data.findtext("keyFingerprint")
results.append(model.Keypair(key_name, key_fingerprint))
return results | def describe_keypairs(self, xml_bytes) | Parse the XML returned by the C{DescribeKeyPairs} function.
@param xml_bytes: XML bytes with a C{DescribeKeyPairsResponse} root
element.
@return: a C{list} of L{Keypair}. | 2.502072 | 2.5855 | 0.967733 |
keypair_data = XML(xml_bytes)
key_name = keypair_data.findtext("keyName")
key_fingerprint = keypair_data.findtext("keyFingerprint")
key_material = keypair_data.findtext("keyMaterial")
return model.Keypair(key_name, key_fingerprint, key_material) | def create_keypair(self, xml_bytes) | Parse the XML returned by the C{CreateKeyPair} function.
@param xml_bytes: XML bytes with a C{CreateKeyPairResponse} root
element.
@return: The L{Keypair} instance created. | 2.368421 | 2.467526 | 0.959836 |
results = []
root = XML(xml_bytes)
for address_data in root.find("addressesSet"):
address = address_data.findtext("publicIp")
instance_id = address_data.findtext("instanceId")
results.append((address, instance_id))
return results | def describe_addresses(self, xml_bytes) | Parse the XML returned by the C{DescribeAddresses} function.
@param xml_bytes: XML bytes with a C{DescribeAddressesResponse} root
element.
@return: a C{list} of L{tuple} of (publicIp, instancId). | 3.617459 | 2.793267 | 1.295064 |
results = []
root = XML(xml_bytes)
for zone_data in root.find("availabilityZoneInfo"):
zone_name = zone_data.findtext("zoneName")
zone_state = zone_data.findtext("zoneState")
results.append(model.AvailabilityZone(zone_name, zone_state))
return results | def describe_availability_zones(self, xml_bytes) | Parse the XML returned by the C{DescribeAvailibilityZones} function.
@param xml_bytes: XML bytes with a C{DescribeAvailibilityZonesResponse}
root element.
@return: a C{list} of L{AvailabilityZone}.
TODO: regionName, messageSet | 2.616801 | 2.879767 | 0.908685 |
version = self.params["SignatureVersion"]
if version == "2":
self.params["SignatureMethod"] = "Hmac%s" % hash_type.upper()
self.params["Signature"] = self.signature.compute() | def sign(self, hash_type="sha256") | Sign this query using its built in credentials.
@param hash_type: if the SignatureVersion is 2, specify the type of
hash to use, either "sha1" or "sha256". It defaults to the latter.
This prepares it to be sent, and should be done as the last step before
submitting the query. Signing is done automatically - this is a public
method to facilitate testing. | 4.65197 | 3.772404 | 1.233158 |
self.sign()
url = self.endpoint.get_uri()
method = self.endpoint.method
params = self.signature.get_canonical_query_params()
headers = {}
kwargs = {"method": method}
if method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
kwargs["postdata"] = params
else:
url += "?%s" % params
if self.endpoint.get_host() != self.endpoint.get_canonical_host():
headers["Host"] = self.endpoint.get_canonical_host()
if headers:
kwargs["headers"] = headers
if self.timeout:
kwargs["timeout"] = self.timeout
d = self.get_page(url, **kwargs)
return d.addErrback(ec2_error_wrapper) | def submit(self) | Submit this query.
@return: A deferred from get_page | 3.050384 | 2.972222 | 1.026297 |
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type) | def compute(self) | Compute and return the signature according to the given data. | 3.376316 | 3.219349 | 1.048757 |
result = []
lower_cmp = lambda x, y: cmp(x[0].lower(), y[0].lower())
for key, value in sorted(self.params.items(), cmp=lower_cmp):
result.append("%s%s" % (key, value))
return "".join(result) | def old_signing_text(self) | Return the text needed for signing using SignatureVersion 1. | 3.170612 | 3.035447 | 1.044529 |
result = "%s\n%s\n%s\n%s" % (self.endpoint.method,
self.endpoint.get_canonical_host(),
self.endpoint.path,
self.get_canonical_query_params())
return result | def signing_text(self) | Return the text to be signed when signing the query. | 4.437809 | 4.005446 | 1.107944 |
result = []
for key, value in self.sorted_params():
result.append("%s=%s" % (self.encode(key), self.encode(value)))
return "&".join(result) | def get_canonical_query_params(self) | Return the canonical query params (used in signing). | 2.949399 | 2.527966 | 1.166708 |
if isinstance(string, unicode):
string = string.encode("utf-8")
return quote(string, safe="~") | def encode(self, string) | Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded. | 3.967019 | 5.184496 | 0.76517 |
root = XML(xml_bytes)
return cls(root.findtext('Bucket'),
root.findtext('Key'),
root.findtext('UploadId')) | def from_xml(cls, xml_bytes) | Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse} | 5.083329 | 4.683074 | 1.085469 |
absA = abs(a)
absB = abs(b)
if absA > absB:
return absA * sqrt(1.0 + (absB / float(absA)) ** 2)
elif absB == 0.0:
return 0.0
else:
return absB * sqrt(1.0 + (absA / float(absB)) ** 2) | def pythag(a, b) | Computer c = (a^2 + b^2)^0.5 without destructive underflow or overflow
It solves the Pythagorean theorem a^2 + b^2 = c^2 | 1.824577 | 2.183177 | 0.835744 |
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) | def _initialize_with_array(self, data, rowBased=True) | Set the matrix values from a two dimensional list. | 1.830759 | 1.733364 | 1.056188 |
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx | def from_timeseries(cls, timeSeries) | Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty. | 4.571927 | 4.041686 | 1.131193 |
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False) | def from_two_dim_array(cls, cols, rows, twoDimArray) | Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix. | 10.441065 | 11.335977 | 0.921056 |
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts | def to_multi_dim_timeseries(self) | Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries | 3.382086 | 3.349182 | 1.009824 |
if rowBased:
array = []
for row in xrange(self._rows):
newRow = []
for col in xrange(self._columns):
newRow.append(self.get_value(col, row))
array.append(newRow)
return array
return copy.deepcopy(self.matrix) | def get_array(self, rowBased=True) | Return a two dimensional list with the values of the :py:obj:`self`.
:param boolean rowBased: Indicates wether the returned list should be
row or column based. Has to be True if list[i] should be the i'th
row, False if list[i] should be the i'th column.
:return: Returns a list representing the matrix rows
containing lists representing the columns for each row.
:rtype: list | 2.621772 | 2.97393 | 0.881585 |
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix | def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True) | Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False). | 5.852719 | 8.084032 | 0.723985 |
self.matrix[column][row] = value | def set_value(self, column, row, value) | Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange. | 6.271886 | 9.513781 | 0.659242 |
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult | def invers(self) | Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations | 6.19702 | 5.8477 | 1.059736 |
resultMatrix = Matrix(matrix.get_width(), self.get_height())
for r_row in xrange(self._rows):
for r_col in xrange(matrix.get_width()):
#blockwise matrix multiplication hack
if isinstance(self.get_array()[0][0], Matrix):
blocksize = self.get_array()[0][0].get_width()
valueT = Matrix(blocksize, blocksize)
else:
valueT = 0
for column in xrange(matrix.get_height()):
valueT += self.get_value(column, r_row) * matrix.get_value(r_col, column)
resultMatrix.set_value(r_col, r_row, valueT)
return resultMatrix | def matrix_multiplication(self, matrix) | Multiply :py:obj:`self` with the given matrix and return result matrix.
param Matrix matrix: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:note: Make sure, that the matrices can be multiplied.
The number of columns of the Matrix instance must match with
the number of rows of the Matrix given as parameter.
Use is_matrix_mult_possible(matrix) to test. | 3.121581 | 3.166001 | 0.98597 |
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten() | def matrix_multiplication_blockwise(self, matrix, blocksize) | http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication | 4.181628 | 3.844066 | 1.087814 |
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False) | def flatten(self) | If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]] | 4.974246 | 4.784669 | 1.039622 |
if self.get_width() % blocksize or self.get_height() % blocksize:
raise ValueError("Number of rows and columns have to be evenly dividable by blocksize")
selfBlocks = []
for columnIndex in range(0, self.get_width() - 1, blocksize):
for rowIndex in range(0, self.get_height() - 1, blocksize):
currentBlock = []
for blockRows in self.get_array(False)[columnIndex:columnIndex + blocksize]:
currentBlock += blockRows[rowIndex:rowIndex + blocksize]
selfBlocks.append(Matrix(blocksize, blocksize, currentBlock, rowBased=False))
return Matrix(self.get_width() / blocksize, self.get_height() / blocksize, selfBlocks, rowBased=False) | def matrix_to_blockmatrix(self, blocksize) | turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix. | 2.545905 | 2.479972 | 1.026586 |
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result | def multiply(self, multiplicator) | Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix | 2.117358 | 2.242924 | 0.944017 |
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix | def transform(self) | Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix | 3.316393 | 3.145779 | 1.054236 |
mArray = self.get_array(rowBased=False)
width = self.get_width()
height = self.get_height()
if not height < width:
raise ValueError()
# Start with complete matrix and remove in each iteration
# the first row and the first column
for offset in xrange(height):
# Switch lines, if current first value is 0
if mArray[offset][offset] == 0:
for i in xrange(offset + 1, height):
if mArray[offset][i] != 0:
tmp = []
for j in xrange(offset, width):
tmp.append(mArray[j][offset])
# tmp = mArray[offset][offset:]
for j in xrange(offset, width):
mArray[j][offset] = mArray[j][i]
mArray[j][i] = tmp[j]
# mArray[offset][offset:] = mArray[i][offset:]
# mArray[i] = tmp
break
currentRow = [mArray[j][offset] for j in xrange(offset, width)]
devider = float(currentRow[0])
# If no line is found with an value != 0
# the matrix is not invertible
if devider == 0:
raise ValueError("Matrix is not invertible")
transformedRow = []
# Devide current row by first element of current row
for value in currentRow:
transformedRow.append(value / devider)
# put transformed row back into matrix
for j in xrange(offset, width):
mArray[j][offset] = transformedRow[j - offset]
# subtract multiples of the current row, from all remaining rows
# in order to become a 0 at the current first column
for i in xrange(offset + 1, height):
multi = mArray[offset][i]
for j in xrange(offset, width):
mArray[j][i] = mArray[j][i] - mArray[j][offset] * multi
for i in xrange(1, height):
# subtract multiples of the i-the row from all above rows
for j in xrange(0, i):
multi = mArray[i][j]
for col in xrange(i, width):
mArray[col][j] = mArray[col][j] - mArray[col][i] * multi
self.matrix = mArray
return self | def gauss_jordan(self) | Reduce :py:obj:`self` to row echelon form.
:return: Returns :py:obj:`self` in row echelon form for convenience.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if:
- the matrix rows < columns
- the matrix is not invertible
In this case :py:obj:`self` is not changed. | 3.002121 | 2.909224 | 1.031932 |
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v) | def householder(self) | Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured | 2.524058 | 2.487663 | 1.01463 |
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform() | def pseudoinverse(self) | Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse. | 4.072349 | 3.826799 | 1.064166 |
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec | def initialize_from_matrix(cls, matrix, column) | Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column. | 3.565204 | 4.467401 | 0.798049 |
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self | def unify(self) | Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector | 5.31798 | 5.349344 | 0.994137 |
divisions = list(self.divisions)
if len(divisions) == 0:
return ''
elif len(divisions) == 1:
return divisions[0].text.strip()
else:
return super().text | def text(self) | Get the entire text content as str | 3.187431 | 2.867632 | 1.11152 |
# get the defined subset of error values
errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate)
errorValues = filter(lambda item: item is not None, errorValues)
return sorted(errorValues)[len(errorValues)//2] | def _calculate(self, startingPercentage, endPercentage, startDate, endDate) | This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`.
Both parameters will be correct at this time.
:param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0].
It represents the value, where the error calculation should be started.
25.0 for example means that the first 25% of all calculated errors will be ignored.
:param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0].
It represents the value, after which all error values will be ignored. 90.0 for example means that
the last 10% of all local errors will be ignored.
:param float startDate: Epoch representing the start date used for error calculation.
:param float endDate: Epoch representing the end date used in the error calculation.
:return: Returns a float representing the error.
:rtype: float | 4.905993 | 4.698373 | 1.04419 |
freq = sum(data_frame.action_type == 1) / data_frame.td[-1]
duration = math.ceil(data_frame.td[-1])
return freq, duration | def frequency(self, data_frame) | This method returns the number of #taps divided by the test duration
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return frequency: frequency
:rtype frequency: float | 8.030599 | 7.874779 | 1.019787 |
f = []
for i in range(0, (data_frame.td[-1].astype('int') - self.window)):
f.append(sum(data_frame.action_type[(data_frame.td >= i) & (data_frame.td < (i + self.window))] == 1) /
float(self.window))
diff_mov_freq = (np.array(f[1:-1]) - np.array(f[0:-2])) / np.array(f[0:-2])
duration = math.ceil(data_frame.td[-1])
return diff_mov_freq, duration | def moving_frequency(self, data_frame) | This method returns moving frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return diff_mov_freq: frequency
:rtype diff_mov_freq: float | 3.568529 | 3.294005 | 1.08334 |
tap_timestamps = data_frame.td[data_frame.action_type==1]
cont_freq = 1.0/(np.array(tap_timestamps[1:-1])-np.array(tap_timestamps[0:-2]))
duration = math.ceil(data_frame.td[-1])
return cont_freq, duration | def continuous_frequency(self, data_frame) | This method returns continuous frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return cont_freq: frequency
:rtype cont_freq: float | 5.43573 | 5.410257 | 1.004708 |
diff = data_frame.td[1:-1].values-data_frame.td[0:-2].values
mmt = np.mean(diff[np.arange(1,len(diff),2)]) * 1000.0
duration = math.ceil(data_frame.td[-1])
return mmt, duration | def mean_moving_time(self, data_frame) | This method calculates the mean time (ms) that the hand was moving from one target to the next
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return mmt: the mean moving time in ms
:rtype mmt: float | 4.377317 | 4.241458 | 1.032031 |
diff = data_frame.td[1:-1].values - data_frame.td[0:-2].values
inc_s = np.var(diff[np.arange(1, len(diff), 2)], dtype=np.float64) * 1000.0
duration = math.ceil(data_frame.td[-1])
return inc_s, duration | def incoordination_score(self, data_frame) | This method calculates the variance of the time interval in msec between taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return is: incoordination score
:rtype is: float | 4.599307 | 4.206432 | 1.093399 |
dist = np.sqrt((data_frame.x[1:-1].values-data_frame.x[0:-2].values)**2+
(data_frame.y[1:-1].values-data_frame.y[0:-2].values)**2)
matd = np.mean(dist[np.arange(1,len(dist),2)])
duration = math.ceil(data_frame.td[-1])
return matd, duration | def mean_alnt_target_distance(self, data_frame) | This method calculates the distance (number of pixels) between alternate tapping
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return matd: the mean alternate target distance in pixels
:rtype matd: float | 3.350661 | 2.933748 | 1.142109 |
# tap_timestamps = data_frame.td[data_frame.action_type == 1]
# grouped = tap_timestamps.groupby(pd.TimeGrouper('30u'))
# return np.mean(grouped.size().values)
ks = sum(data_frame.action_type == 1)
duration = math.ceil(data_frame.td[-1])
return ks, duration | def kinesia_scores(self, data_frame) | This method calculates the number of key taps
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ks: key taps
:rtype ks: float
:return duration: test duration (seconds)
:rtype duration: float | 6.311905 | 4.944491 | 1.276553 |
raise_timestamps = data_frame.td[data_frame.action_type == 1]
down_timestamps = data_frame.td[data_frame.action_type == 0]
if len(raise_timestamps) == len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values)
else:
if len(raise_timestamps) > len(down_timestamps):
at = np.mean(down_timestamps.values - raise_timestamps.values[:-(len(raise_timestamps)
- len(down_timestamps))])
else:
at = np.mean(down_timestamps.values[:-(len(down_timestamps)-len(raise_timestamps))]
- raise_timestamps.values)
duration = math.ceil(data_frame.td[-1])
return np.abs(at), duration | def akinesia_times(self, data_frame) | This method calculates akinesia times, mean dwell time on each key in milliseconds
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return at: akinesia times
:rtype at: float
:return duration: test duration (seconds)
:rtype duration: float | 2.754261 | 2.484287 | 1.108673 |
tap_data = data_frame[data_frame.action_type == 0]
ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2))
duration = math.ceil(data_frame.td[-1])
return ds, duration | def dysmetria_score(self, data_frame) | This method calculates accuracy of target taps in pixels
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ds: dysmetria score in pixels
:rtype ds: float | 4.354365 | 3.654633 | 1.191464 |
try:
return {pre+'frequency': self.frequency(data_frame)[0],
pre+'mean_moving_time': self.mean_moving_time(data_frame)[0],
pre+'incoordination_score': self.incoordination_score(data_frame)[0],
pre+'mean_alnt_target_distance': self.mean_alnt_target_distance(data_frame)[0],
pre+'kinesia_scores': self.kinesia_scores(data_frame)[0],
pre+'akinesia_times': self.akinesia_times(data_frame)[0],
pre+'dysmetria_score': self.dysmetria_score(data_frame)[0]}
except:
logging.error("Error on FingerTappingProcessor process, extract features: %s", sys.exc_info()[0]) | def extract_features(self, data_frame, pre='') | This method extracts all the features available to the Finger Tapping Processor class.
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \
'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score'
:rtype: list | 3.855223 | 1.71386 | 2.249438 |
# m = self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values
# print(itemfreq(m))
#
# for i, row in enumerate(self.labels.drop(['id','MDS_UPDRSIII'], axis=1).values):
# print(np.bincount(row))
try:
for obs in self.observations:
features, ids = self.__get_features_for_observation(observation=obs, skip_id=3497,
last_column_is_id=True)
normalised_data = whiten(features)
x = pd.DataFrame(normalised_data)
y = self.labels[obs].values
# x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42)
knn = KNeighborsClassifier(n_neighbors=n_neighbors, weights='distance')
# knn.fit(x_train, y_train)
knn.fit(x, y)
# print('Accuracy of K-NN classifier: {:.2f}'.format(knn.score(x, y)))
# print('Accuracy of K-NN classifier on training set: {:.2f}'.format(knn.score(x_train, y_train)))
# print('Accuracy of K-NN classifier on test set: {:.2f}'.format(knn.score(x_test, y_test)))
# print('------')
if not self.knns:
self.knns = [[obs, knn]]
else:
self.knns.append([obs, knn])
except IOError as e:
ierr = "({}): {}".format(e.errno, e.strerror)
logging.error("Error training Clinical UPDRS, file not found, I/O error %s", ierr)
except ValueError as verr:
logging.error("Error training Clinical UPDRS ValueError ->%s", verr.message)
except:
logging.error("Unexpected error on training Clinical UPDRS init: %s", sys.exc_info()[0]) | def __train(self, n_neighbors=3) | Train the classifier implementing the `k-nearest neighbors vote <http://scikit-learn.org/stable/modules/\
generated/sklearn.neighbors.KNeighborsClassifier.html>`_
:param n_clusters: the number of clusters
:type n_clusters: int | 3.290255 | 3.327722 | 0.988741 |
try:
features = np.array([])
if data_frame is None:
data_frame = self.data_frame
for index, row in data_frame.iterrows():
if not skip_id == row['id']:
features_row = np.nan_to_num(row[row.keys().str.contains(observation)].values)
features_row = np.append(features_row, row['id'])
features = np.vstack([features, features_row]) if features.size else features_row
# not the same when getting a single point
if last_column_is_id:
if np.ndim(features) > 1:
to_return = features[:,:-1]
else:
to_return = features[:-1]
else:
to_return = features
return to_return, data_frame['id'].values
except:
logging.error(" observation not found in data frame") | def __get_features_for_observation(self, data_frame=None, observation='LA-LL',
skip_id=None, last_column_is_id=False) | Extract the features for a given observation from a data frame
:param data_frame: data frame to get features from
:type data_frame: pandas.DataFrame
:param observation: observation name
:type observation: string
:param skip_id: skip any test with a given id (optional)
:type skip_id: int
:param last_column_is_id: skip the last column of the data frame (useful when id is last column - optional)
:type last_column_is_id: bool
:return features: the features
:rtype features: np.array | 2.995337 | 2.994321 | 1.000339 |
scores = np.array([])
for obs in self.observations:
knn = self.__get_knn_by_observation(obs)
p, ids = self.__get_features_for_observation(data_frame=measurement, observation=obs,
skip_id=3497, last_column_is_id=True)
score = knn.predict(pd.DataFrame(p).T)
scores = np.append(scores, score, axis=0)
if output_format == 'array':
return scores.astype(int)
else:
return np.array_str(scores.astype(int)) | def predict(self, measurement, output_format='array') | Method to predict the class labels for the provided data
:param measurement: the point to classify
:type measurement: pandas.DataFrame
:param output_format: the format to return the scores ('array' or 'str')
:type output_format: string
:return prediction: the prediction for a given test/point
:rtype prediction: np.array | 4.76976 | 4.924162 | 0.968644 |
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result | def _namify_arguments(mapping) | Ensure that a mapping of names to parameters has the parameters set to the
correct name. | 3.887783 | 2.827278 | 1.375097 |
for key in path[:-1]:
for item in alist:
if item[0] == key:
alist = item[1]
break
else:
subalist = []
alist.append((key, subalist))
alist = subalist
alist.append((path[-1], value)) | def _merge_associative_list(alist, path, value) | Merge a value into an associative list at the given path, maintaining
insertion order. Examples will explain it::
>>> alist = []
>>> _merge_associative_list(alist, ["foo", "bar"], "barvalue")
>>> _merge_associative_list(alist, ["foo", "baz"], "bazvalue")
>>> alist == [("foo", [("bar", "barvalue"), ("baz", "bazvalue")])]
@param alist: An associative list of names to values.
@param path: A path through sub-alists which we ultimately want to point to
C{value}.
@param value: The value to set.
@return: None. This operation mutates the associative list in place. | 2.250296 | 2.36752 | 0.950487 |
if value is None:
if self.optional:
return self.default
else:
value = ""
if value == "":
if not self.allow_none:
raise MissingParameterError(self.name, kind=self.kind)
return self.default
try:
self._check_range(value)
parsed = self.parse(value)
if self.validator and not self.validator(parsed):
raise ValueError(value)
return parsed
except ValueError:
try:
value = value.decode("utf-8")
message = "Invalid %s value %s" % (self.kind, value)
except UnicodeDecodeError:
message = "Invalid %s value" % self.kind
raise InvalidParameterValueError(message) | def coerce(self, value) | Coerce a single value according to this parameter's settings.
@param value: A L{str}, or L{None}. If L{None} is passed - meaning no
value is avalable at all, not even the empty string - and this
parameter is optional, L{self.default} will be returned. | 2.845644 | 2.855184 | 0.996659 |
if self.min is None and self.max is None:
return
measure = self.measure(value)
prefix = "Value (%s) for parameter %s is invalid. %s"
if self.min is not None and measure < self.min:
message = prefix % (value, self.name,
self.lower_than_min_template % self.min)
raise InvalidParameterValueError(message)
if self.max is not None and measure > self.max:
message = prefix % (value, self.name,
self.greater_than_max_template % self.max)
raise InvalidParameterValueError(message) | def _check_range(self, value) | Check that the given C{value} is in the expected range. | 2.827983 | 2.661467 | 1.062566 |
indices = []
if not isinstance(value, dict):
# We interpret non-list inputs as a list of one element, for
# compatibility with certain EC2 APIs.
return [self.item.coerce(value)]
for index in value.keys():
try:
indices.append(int(index))
except ValueError:
raise UnknownParameterError(index)
result = [None] * len(value)
for index_index, index in enumerate(sorted(indices)):
v = value[str(index)]
if index < 0:
raise UnknownParameterError(index)
result[index_index] = self.item.coerce(v)
return result | def parse(self, value) | Convert a dictionary of {relative index: value} to a list of parsed
C{value}s. | 3.749925 | 3.738056 | 1.003175 |
if isinstance(value, Arguments):
return dict((str(i), self.item.format(v)) for i, v in value)
return dict((str(i + 1), self.item.format(v))
for i, v in enumerate(value)) | def format(self, value) | Convert a list like::
["a", "b", "c"]
to:
{"1": "a", "2": "b", "3": "c"}
C{value} may also be an L{Arguments} instance, mapping indices to
values. Who knows why. | 3.596615 | 2.788533 | 1.289788 |
result = {}
rest = {}
for k, v in value.iteritems():
if k in self.fields:
if (isinstance(v, dict)
and not self.fields[k].supports_multiple):
if len(v) == 1:
# We support "foo.1" as "foo" as long as there is only
# one "foo.#" parameter provided.... -_-
v = v.values()[0]
else:
raise InvalidParameterCombinationError(k)
result[k] = self.fields[k].coerce(v)
else:
rest[k] = v
for k, v in self.fields.iteritems():
if k not in result:
result[k] = v.coerce(None)
if rest:
raise UnknownParametersError(result, rest)
return result | def parse(self, value) | Convert a dictionary of raw values to a dictionary of processed values. | 3.905345 | 3.742153 | 1.043609 |
if not isinstance(value, Arguments):
value = value.iteritems()
return dict((k, self.fields[k].format(v)) for k, v in value) | def format(self, value) | Convert a dictionary of processed values to a dictionary of raw values. | 5.312234 | 5.122385 | 1.037063 |
if isinstance(value, dict):
if any(isinstance(name, int) for name in value.keys()):
if not all(isinstance(name, int) for name in value.keys()):
raise RuntimeError("Integer and non-integer keys: %r"
% value.keys())
items = sorted(value.iteritems(), key=itemgetter(0))
return [self._wrap(val) for _, val in items]
else:
return Arguments(value)
elif isinstance(value, list):
return [self._wrap(x) for x in value]
else:
return value | def _wrap(self, value) | Wrap the given L{tree} with L{Arguments} as necessary.
@param tree: A {dict}, containing L{dict}s and/or leaf values, nested
arbitrarily deep. | 2.867297 | 2.688917 | 1.066339 |
structure = Structure(fields=dict([(p.name, p)
for p in self._parameters]))
try:
tree = structure.coerce(self._convert_flat_to_nest(params))
rest = {}
except UnknownParametersError, error:
tree = error.result
rest = self._convert_nest_to_flat(error.unknown)
return Arguments(tree), rest | def extract(self, params) | Extract parameters from a raw C{dict} according to this schema.
@param params: The raw parameters to parse.
@return: A tuple of an L{Arguments} object holding the extracted
arguments and any unparsed arguments. | 10.053199 | 8.362821 | 1.20213 |
params = {}
for argument in arguments:
params.update(argument)
params.update(extra)
result = {}
for name, value in params.iteritems():
if value is None:
continue
segments = name.split('.')
first = segments[0]
parameter = self.get_parameter(first)
if parameter is None:
raise RuntimeError("Parameter '%s' not in schema" % name)
else:
if value is None:
result[name] = ""
else:
result[name] = parameter.format(value)
return self._convert_nest_to_flat(result) | def bundle(self, *arguments, **extra) | Bundle the given arguments in a C{dict} with EC2-style format.
@param arguments: L{Arguments} instances to bundle. Keys in
later objects will override those in earlier objects.
@param extra: Any number of additional parameters. These will override
similarly named arguments in L{arguments}. | 3.43353 | 3.766321 | 0.91164 |
for parameter in self._parameters:
if parameter.name == name:
return parameter | def get_parameter(self, name) | Get the parameter on this schema with the given C{name}. | 3.609406 | 3.520903 | 1.025136 |
result = {}
for k, v in params.iteritems():
last = result
segments = k.split('.')
for index, item in enumerate(segments):
if index == len(segments) - 1:
newd = v
else:
newd = {}
if not isinstance(last, dict):
raise InconsistentParameterError(k)
if type(last.get(item)) is dict and type(newd) is not dict:
raise InconsistentParameterError(k)
last = last.setdefault(item, newd)
return result | def _convert_flat_to_nest(self, params) | Convert a structure in the form of::
{'foo.1.bar': 'value',
'foo.2.baz': 'value'}
to::
{'foo': {'1': {'bar': 'value'},
'2': {'baz': 'value'}}}
This is intended for use both during parsing of HTTP arguments like
'foo.1.bar=value' and when dealing with schema declarations that look
like 'foo.n.bar'.
This is the inverse of L{_convert_nest_to_flat}. | 2.752795 | 2.655943 | 1.036466 |
if _result is None:
_result = {}
for k, v in params.iteritems():
if _prefix is None:
path = k
else:
path = _prefix + '.' + k
if isinstance(v, dict):
self._convert_nest_to_flat(v, _result=_result, _prefix=path)
else:
_result[path] = v
return _result | def _convert_nest_to_flat(self, params, _result=None, _prefix=None) | Convert a data structure that looks like::
{"foo": {"bar": "baz", "shimmy": "sham"}}
to::
{"foo.bar": "baz",
"foo.shimmy": "sham"}
This is the inverse of L{_convert_flat_to_nest}. | 1.63176 | 1.718712 | 0.949409 |
new_kwargs = {
'name': self.name,
'doc': self.doc,
'parameters': self._parameters[:],
'result': self.result.copy() if self.result else {},
'errors': self.errors.copy() if self.errors else set()}
if 'parameters' in kwargs:
new_params = kwargs.pop('parameters')
new_kwargs['parameters'].extend(new_params)
new_kwargs['result'].update(kwargs.pop('result', {}))
new_kwargs['errors'].update(kwargs.pop('errors', set()))
new_kwargs.update(kwargs)
if schema_items:
parameters = self._convert_old_schema(schema_items)
new_kwargs['parameters'].extend(parameters)
return Schema(**new_kwargs) | def extend(self, *schema_items, **kwargs) | Add any number of schema items to a new schema.
Takes the same arguments as the constructor, and returns a new
L{Schema} instance.
If parameters, result, or errors is specified, they will be merged with
the existing parameters, result, or errors. | 2.521621 | 2.345124 | 1.075262 |
# 'merged' here is an associative list that maps parameter names to
# Parameter instances, OR sub-associative lists which represent nested
# lists and structures.
# e.g.,
# [Integer("foo")]
# becomes
# [("foo", Integer("foo"))]
# and
# [Integer("foo.bar")]
# (which represents a list of integers called "foo" with a meaningless
# index name of "bar") becomes
# [("foo", [("bar", Integer("foo.bar"))])].
merged = []
for parameter in parameters:
segments = parameter.name.split('.')
_merge_associative_list(merged, segments, parameter)
result = [self._inner_convert_old_schema(node, 1) for node in merged]
return result | def _convert_old_schema(self, parameters) | Convert an ugly old schema, using dotted names, to the hot new schema,
using List and Structure.
The old schema assumes that every other dot implies an array. So a list
of two parameters,
[Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")]
becomes::
[List(
"foo",
item=Structure(
fields={"baz": List(item=Integer()),
"shimmy": Integer()}))]
By design, the old schema syntax ignored the names "bar" and "quux". | 6.851778 | 6.139489 | 1.116018 |
name, parameter_description = node
if not isinstance(parameter_description, list):
# This is a leaf, i.e., an actual L{Parameter} instance.
return parameter_description
if depth % 2 == 0:
# we're processing a structure.
fields = {}
for node in parameter_description:
fields[node[0]] = self._inner_convert_old_schema(
node, depth + 1)
return Structure(name, fields=fields)
else:
# we're processing a list.
if not isinstance(parameter_description, list):
raise TypeError("node %r must be an associative list"
% (parameter_description,))
if not len(parameter_description) == 1:
raise ValueError(
"Multiple different index names specified: %r"
% ([item[0] for item in parameter_description],))
subnode = parameter_description[0]
item = self._inner_convert_old_schema(subnode, depth + 1)
return List(name=name, item=item, optional=item.optional) | def _inner_convert_old_schema(self, node, depth) | Internal recursion helper for L{_convert_old_schema}.
@param node: A node in the associative list tree as described in
_convert_old_schema. A two tuple of (name, parameter).
@param depth: The depth that the node is at. This is important to know
if we're currently processing a list or a structure. ("foo.N" is a
list called "foo", "foo.N.fieldname" describes a field in a list of
structs). | 3.468987 | 2.975146 | 1.165989 |
while size > 0:
progress_type, value = progress_queue.get()
if progress_type == ProgressQueue.PROCESSED:
chunk_size = value
watcher.transferring_item(item, increment_amt=chunk_size)
size -= chunk_size
elif progress_type == ProgressQueue.START_WAITING:
watcher.start_waiting()
elif progress_type == ProgressQueue.DONE_WAITING:
watcher.done_waiting()
else:
error_message = value
for process in processes:
process.terminate()
raise ValueError(error_message)
for process in processes:
process.join() | def wait_for_processes(processes, size, progress_queue, watcher, item) | Watch progress queue for errors or progress.
Cleanup processes on error or success.
:param processes: [Process]: processes we are waiting to finish downloading a file
:param size: int: how many values we expect to be processed by processes
:param progress_queue: ProgressQueue: queue which will receive tuples of progress or error
:param watcher: ProgressPrinter: we notify of our progress:
:param item: object: RemoteFile/LocalFile we are transferring. | 3.535271 | 3.362405 | 1.051412 |
if platform.system().upper() != 'WINDOWS':
filename = os.path.expanduser(filename)
if os.path.exists(filename):
file_stat = os.stat(filename)
if mode_allows_group_or_other(file_stat.st_mode):
raise ValueError(CONFIG_FILE_PERMISSIONS_ERROR) | def verify_file_private(filename) | Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat. | 4.115083 | 3.171458 | 1.297537 |
self.cnt += increment_amt
percent_done = int(float(self.cnt) / float(self.total) * 100.0)
if KindType.is_project(item):
details = 'project'
else:
details = os.path.basename(item.path)
self.progress_bar.update(percent_done, '{} {}'.format(self.msg_verb, details))
self.progress_bar.show() | def transferring_item(self, item, increment_amt=1) | Update progress that item is about to be transferred.
:param item: LocalFile, LocalFolder, or LocalContent(project) that is about to be sent.
:param increment_amt: int amount to increase our count(how much progress have we made) | 4.471487 | 3.979715 | 1.12357 |
self.progress_bar.set_state(ProgressBar.STATE_DONE)
self.progress_bar.show() | def finished(self) | Must be called to print final progress label. | 6.667727 | 4.901665 | 1.360298 |
if not self.waiting:
self.waiting = True
wait_msg = "Waiting for project to become ready for {}".format(self.msg_verb)
self.progress_bar.show_waiting(wait_msg) | def start_waiting(self) | Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state. | 6.85093 | 5.559413 | 1.232312 |
if self.waiting:
self.waiting = False
self.progress_bar.show_running() | def done_waiting(self) | Show running progress bar (only has an effect if we are in waiting state). | 8.129671 | 4.711215 | 1.725599 |
self.wait_msg = wait_msg
self.set_state(ProgressBar.STATE_WAITING)
self.show() | def show_waiting(self, wait_msg) | Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state.
:param wait_msg: str: message describing what we are waiting for | 5.031125 | 5.209771 | 0.965709 |
if KindType.is_project(item):
visitor.visit_project(item)
elif KindType.is_folder(item):
visitor.visit_folder(item, parent)
else:
visitor.visit_file(item, parent)
if not KindType.is_file(item):
for child in item.children:
ProjectWalker._visit_content(child, item, visitor) | def _visit_content(item, parent, visitor) | Recursively visit nodes in the project tree.
:param item: LocalContent/LocalFolder/LocalFile we are traversing down from
:param parent: LocalContent/LocalFolder parent or None
:param visitor: object visiting the tree | 2.820332 | 2.859185 | 0.986411 |
# Define our own query parser which can handle the consequences of
# `?acl` and such (subresources). At its best, parse_qsl doesn't
# let us differentiate between these and empty values (such as
# `?acl=`).
def p(s):
results = []
args = s.split(u"&")
for a in args:
pieces = a.split(u"=")
if len(pieces) == 1:
results.append((unquote(pieces[0]),))
elif len(pieces) == 2:
results.append(tuple(map(unquote, pieces)))
else:
raise Exception("oh no")
return results
query = []
path = []
if bucket is None:
path.append(u"")
else:
if isinstance(bucket, bytes):
bucket = bucket.decode("utf-8")
path.append(bucket)
if object_name is None:
path.append(u"")
else:
if isinstance(object_name, bytes):
object_name = object_name.decode("utf-8")
if u"?" in object_name:
object_name, query = object_name.split(u"?", 1)
query = p(query)
object_name_components = object_name.split(u"/")
if object_name_components[0] == u"":
object_name_components.pop(0)
if object_name_components:
path.extend(object_name_components)
else:
path.append(u"")
return _S3URLContext(
scheme=service_endpoint.scheme.decode("utf-8"),
host=service_endpoint.get_host().decode("utf-8"),
port=service_endpoint.port,
path=path,
query=query,
) | def s3_url_context(service_endpoint, bucket=None, object_name=None) | Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{unicode}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{unicode} | 2.644268 | 2.67189 | 0.989662 |
details = self._details(
method=b"GET",
url_context=self._url_context(),
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_list_buckets)
return d | def list_buckets(self) | List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request. | 6.823057 | 7.137472 | 0.955949 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.