sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def create(self, root=None, namespace=None):
"""Create a sequence element with the given root.
@param root: The C{etree.Element} to root the sequence at, if C{None} a
new one will be created..
@result: A L{SequenceItem} with the given root.
@raises L{ECResponseError}: If the given C{root} has a bad tag.
"""
if root is not None:
tag = root.tag
if root.nsmap:
namespace = root.nsmap[None]
tag = tag[len(namespace) + 2:]
if tag != self.tag:
raise WSDLParseError("Expected response with tag '%s', but "
"got '%s' instead" % (self.tag, tag))
return SequenceItem(self, root, namespace) | Create a sequence element with the given root.
@param root: The C{etree.Element} to root the sequence at, if C{None} a
new one will be created..
@result: A L{SequenceItem} with the given root.
@raises L{ECResponseError}: If the given C{root} has a bad tag. | entailment |
def set(self, child, min_occurs=1, max_occurs=1):
"""Set the schema for the sequence children.
@param child: The schema that children must match.
@param min_occurs: The minimum number of children the sequence
must have.
@param max_occurs: The maximum number of children the sequence
can have.
"""
if isinstance(child, LeafSchema):
raise RuntimeError("Sequence can't have leaf children")
if self.child is not None:
raise RuntimeError("Sequence has already a child")
if min_occurs is None or max_occurs is None:
raise RuntimeError("Sequence node without min or max")
if isinstance(child, LeafSchema):
raise RuntimeError("Sequence node with leaf child type")
if not child.tag == "item":
raise RuntimeError("Sequence node with bad child tag")
self.child = child
self.min_occurs = min_occurs
self.max_occurs = max_occurs
return child | Set the schema for the sequence children.
@param child: The schema that children must match.
@param min_occurs: The minimum number of children the sequence
must have.
@param max_occurs: The maximum number of children the sequence
can have. | entailment |
def append(self):
"""Append a new item to the sequence, appending it to the end.
@return: The newly created item.
@raises L{WSDLParseError}: If the operation would result in having
more child elements than the allowed max.
"""
tag = self._schema.tag
children = self._root.getchildren()
if len(children) >= self._schema.max_occurs:
raise WSDLParseError("Too many items in tag '%s'" % tag)
schema = self._schema.child
tag = "item"
if self._namespace is not None:
tag = "{%s}%s" % (self._namespace, tag)
child = etree.SubElement(self._root, tag)
return schema.create(child) | Append a new item to the sequence, appending it to the end.
@return: The newly created item.
@raises L{WSDLParseError}: If the operation would result in having
more child elements than the allowed max. | entailment |
def remove(self, item):
"""Remove the given C{item} from the sequence.
@raises L{WSDLParseError}: If the operation would result in having
less child elements than the required min_occurs, or if no such
index is found.
"""
for index, child in enumerate(self._root.getchildren()):
if child is item._root:
del self[index]
return item
raise WSDLParseError("Non existing item in tag '%s'" %
self._schema.tag) | Remove the given C{item} from the sequence.
@raises L{WSDLParseError}: If the operation would result in having
less child elements than the required min_occurs, or if no such
index is found. | entailment |
def _get_child(self, children, index):
"""Return the child with the given index."""
try:
return children[index]
except IndexError:
raise WSDLParseError("Non existing item in tag '%s'" %
self._schema.tag) | Return the child with the given index. | entailment |
def parse(self, wsdl):
"""Parse the given C{wsdl} data and build the associated schemas.
@param wdsl: A string containing the raw xml of the WDSL definition
to parse.
@return: A C{dict} mapping response type names to their schemas.
"""
parser = etree.XMLParser(remove_blank_text=True, remove_comments=True)
root = etree.fromstring(wsdl, parser=parser)
types = {}
responses = {}
schemas = {}
namespace = root.attrib["targetNamespace"]
for element in root[0][0]:
self._remove_namespace_from_tag(element)
if element.tag in ["annotation", "group"]:
continue
name = element.attrib["name"]
if element.tag == "element":
if name.endswith("Response"):
if name in responses:
raise RuntimeError("Schema already defined")
responses[name] = element
elif element.tag == "complexType":
types[name] = [element, False]
else:
raise RuntimeError("Top-level element with unexpected tag")
for name, element in responses.iteritems():
schemas[name] = self._parse_type(element, types)
schemas[name].namespace = namespace
return schemas | Parse the given C{wsdl} data and build the associated schemas.
@param wdsl: A string containing the raw xml of the WDSL definition
to parse.
@return: A C{dict} mapping response type names to their schemas. | entailment |
def _parse_type(self, element, types):
"""Parse a 'complexType' element.
@param element: The top-level complexType element
@param types: A map of the elements of all available complexType's.
@return: The schema for the complexType.
"""
name = element.attrib["name"]
type = element.attrib["type"]
if not type.startswith("tns:"):
raise RuntimeError("Unexpected element type %s" % type)
type = type[4:]
[children] = types[type][0]
types[type][1] = True
self._remove_namespace_from_tag(children)
if children.tag not in ("sequence", "choice"):
raise RuntimeError("Unexpected children type %s" % children.tag)
if children[0].attrib["name"] == "item":
schema = SequenceSchema(name)
else:
schema = NodeSchema(name)
for child in children:
self._remove_namespace_from_tag(child)
if child.tag == "element":
name, type, min_occurs, max_occurs = self._parse_child(child)
if type in self.leaf_types:
if max_occurs != 1:
raise RuntimeError("Unexpected max value for leaf")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Attempt to add leaf to a non-node")
schema.add(LeafSchema(name), min_occurs=min_occurs)
else:
if name == "item": # sequence
if not isinstance(schema, SequenceSchema):
raise RuntimeError("Attempt to set child for "
"non-sequence")
schema.set(self._parse_type(child, types),
min_occurs=min_occurs,
max_occurs=max_occurs)
else:
if max_occurs != 1:
raise RuntimeError("Unexpected max for node")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Unexpected schema type")
schema.add(self._parse_type(child, types),
min_occurs=min_occurs)
elif child.tag == "choice":
pass
else:
raise RuntimeError("Unexpected child type")
return schema | Parse a 'complexType' element.
@param element: The top-level complexType element
@param types: A map of the elements of all available complexType's.
@return: The schema for the complexType. | entailment |
def _parse_child(self, child):
"""Parse a single child element.
@param child: The child C{etree.Element} to parse.
@return: A tuple C{(name, type, min_occurs, max_occurs)} with the
details about the given child.
"""
if set(child.attrib) - set(["name", "type", "minOccurs", "maxOccurs"]):
raise RuntimeError("Unexpected attribute in child")
name = child.attrib["name"]
type = child.attrib["type"].split(":")[1]
min_occurs = child.attrib.get("minOccurs")
max_occurs = child.attrib.get("maxOccurs")
if min_occurs is None:
min_occurs = "1"
min_occurs = int(min_occurs)
if max_occurs is None:
max_occurs = "1"
if max_occurs != "unbounded":
max_occurs = int(max_occurs)
return name, type, min_occurs, max_occurs | Parse a single child element.
@param child: The child C{etree.Element} to parse.
@return: A tuple C{(name, type, min_occurs, max_occurs)} with the
details about the given child. | entailment |
def get_existing_item(self, item):
"""
Lookup item in remote service based on keys.
:param item: D4S2Item data contains keys we will use for lookup.
:return: requests.Response containing the successful result
"""
params = {
'project_id': item.project_id,
'from_user_id': item.from_user_id,
'to_user_id': item.to_user_id,
}
resp = requests.get(self.make_url(item.destination), headers=self.json_headers, params=params)
self.check_response(resp)
return resp | Lookup item in remote service based on keys.
:param item: D4S2Item data contains keys we will use for lookup.
:return: requests.Response containing the successful result | entailment |
def create_item(self, item):
"""
Create a new item in D4S2 service for item at the specified destination.
:param item: D4S2Item data to use for creating a D4S2 item
:return: requests.Response containing the successful result
"""
item_dict = {
'project_id': item.project_id,
'from_user_id': item.from_user_id,
'to_user_id': item.to_user_id,
'role': item.auth_role,
'user_message': item.user_message
}
if item.share_user_ids:
item_dict['share_user_ids'] = item.share_user_ids
data = json.dumps(item_dict)
resp = requests.post(self.make_url(item.destination), headers=self.json_headers, data=data)
self.check_response(resp)
return resp | Create a new item in D4S2 service for item at the specified destination.
:param item: D4S2Item data to use for creating a D4S2 item
:return: requests.Response containing the successful result | entailment |
def send_item(self, destination, item_id, force_send):
"""
Run send method for item_id at destination.
:param destination: str which type of operation are we doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param item_id: str D4S2 service id representing the item we want to send
:param force_send: bool it's ok to email the item again
:return: requests.Response containing the successful result
"""
data = json.dumps({
'force': force_send,
})
url_suffix = "{}/send/".format(item_id)
resp = requests.post(self.make_url(destination, url_suffix), headers=self.json_headers, data=data)
self.check_response(resp)
return resp | Run send method for item_id at destination.
:param destination: str which type of operation are we doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param item_id: str D4S2 service id representing the item we want to send
:param force_send: bool it's ok to email the item again
:return: requests.Response containing the successful result | entailment |
def check_response(self, response):
"""
Raises error if the response isn't successful.
:param response: requests.Response response to be checked
"""
if response.status_code == 401:
raise D4S2Error(UNAUTHORIZED_MESSAGE)
if not 200 <= response.status_code < 300:
raise D4S2Error("Request to {} failed with {}:\n{}.".format(response.url, response.status_code,
response.text)) | Raises error if the response isn't successful.
:param response: requests.Response response to be checked | entailment |
def send(self, api, force_send):
"""
Send this item using api.
:param api: D4S2Api sends messages to D4S2
:param force_send: bool should we send even if the item already exists
"""
item_id = self.get_existing_item_id(api)
if not item_id:
item_id = self.create_item_returning_id(api)
api.send_item(self.destination, item_id, force_send)
else:
if force_send:
api.send_item(self.destination, item_id, force_send)
else:
item_type = D4S2Api.DEST_TO_NAME.get(self.destination, "Item")
msg = "{} already sent. Run with --resend argument to resend."
raise D4S2Error(msg.format(item_type), warning=True) | Send this item using api.
:param api: D4S2Api sends messages to D4S2
:param force_send: bool should we send even if the item already exists | entailment |
def get_existing_item_id(self, api):
"""
Lookup the id for this item via the D4S2 service.
:param api: D4S2Api object who communicates with D4S2 server.
:return str id of this item or None if not found
"""
resp = api.get_existing_item(self)
items = resp.json()
num_items = len(items)
if num_items == 0:
return None
else:
return items[0]['id'] | Lookup the id for this item via the D4S2 service.
:param api: D4S2Api object who communicates with D4S2 server.
:return str id of this item or None if not found | entailment |
def create_item_returning_id(self, api):
"""
Create this item in the D4S2 service.
:param api: D4S2Api object who communicates with D4S2 server.
:return str newly created id for this item
"""
resp = api.create_item(self)
item = resp.json()
return item['id'] | Create this item in the D4S2 service.
:param api: D4S2Api object who communicates with D4S2 server.
:return str newly created id for this item | entailment |
def share(self, project, to_user, force_send, auth_role, user_message):
"""
Send mail and give user specified access to the project.
:param project: RemoteProject project to share
:param to_user: RemoteUser user to receive email/access
:param auth_role: str project role eg 'project_admin' to give to the user
:param user_message: str message to be sent with the share
:return: str email we share the project with
"""
if self._is_current_user(to_user):
raise ShareWithSelfError(SHARE_WITH_SELF_MESSAGE.format("share"))
if not to_user.email:
self._raise_user_missing_email_exception("share")
self.set_user_project_permission(project, to_user, auth_role)
return self._share_project(D4S2Api.SHARE_DESTINATION, project, to_user, force_send, auth_role, user_message) | Send mail and give user specified access to the project.
:param project: RemoteProject project to share
:param to_user: RemoteUser user to receive email/access
:param auth_role: str project role eg 'project_admin' to give to the user
:param user_message: str message to be sent with the share
:return: str email we share the project with | entailment |
def set_user_project_permission(self, project, user, auth_role):
"""
Give user access permissions for a project.
:param project: RemoteProject project to update permissions on
:param user: RemoteUser user to receive permissions
:param auth_role: str project role eg 'project_admin'
"""
self.remote_store.set_user_project_permission(project, user, auth_role) | Give user access permissions for a project.
:param project: RemoteProject project to update permissions on
:param user: RemoteUser user to receive permissions
:param auth_role: str project role eg 'project_admin' | entailment |
def deliver(self, project, new_project_name, to_user, share_users, force_send, path_filter, user_message):
"""
Remove access to project_name for to_user, copy to new_project_name if not None,
send message to service to email user so they can have access.
:param project: RemoteProject pre-existing project to be delivered
:param new_project_name: str name of non-existing project to copy project_name to, if None we don't copy
:param to_user: RemoteUser user we are handing over the project to
:param share_users: [RemoteUser] who will have project shared with them once to_user accepts the project
:param force_send: boolean enables resending of email for existing projects
:param path_filter: PathFilter: filters what files are shared
:param user_message: str message to be sent with the share
:return: str email we sent deliver to
"""
if self._is_current_user(to_user):
raise ShareWithSelfError(SHARE_WITH_SELF_MESSAGE.format("deliver"))
if not to_user.email:
self._raise_user_missing_email_exception("deliver")
self.remove_user_permission(project, to_user)
if new_project_name:
project = self._copy_project(project, new_project_name, path_filter)
return self._share_project(D4S2Api.DELIVER_DESTINATION, project, to_user,
force_send, user_message=user_message, share_users=share_users) | Remove access to project_name for to_user, copy to new_project_name if not None,
send message to service to email user so they can have access.
:param project: RemoteProject pre-existing project to be delivered
:param new_project_name: str name of non-existing project to copy project_name to, if None we don't copy
:param to_user: RemoteUser user we are handing over the project to
:param share_users: [RemoteUser] who will have project shared with them once to_user accepts the project
:param force_send: boolean enables resending of email for existing projects
:param path_filter: PathFilter: filters what files are shared
:param user_message: str message to be sent with the share
:return: str email we sent deliver to | entailment |
def _share_project(self, destination, project, to_user, force_send, auth_role='', user_message='',
share_users=None):
"""
Send message to remote service to email/share project with to_user.
:param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param project: RemoteProject project we are sharing
:param to_user: RemoteUser user we are sharing with
:param auth_role: str project role eg 'project_admin' email is customized based on this setting.
:param user_message: str message to be sent with the share
:param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only)
:return: the email the user should receive a message on soon
"""
from_user = self.remote_store.get_current_user()
share_user_ids = None
if share_users:
share_user_ids = [share_user.id for share_user in share_users]
item = D4S2Item(destination=destination,
from_user_id=from_user.id,
to_user_id=to_user.id,
project_id=project.id,
project_name=project.name,
auth_role=auth_role,
user_message=user_message,
share_user_ids=share_user_ids)
item.send(self.api, force_send)
return to_user.email | Send message to remote service to email/share project with to_user.
:param destination: str which type of sharing we are doing (SHARE_DESTINATION or DELIVER_DESTINATION)
:param project: RemoteProject project we are sharing
:param to_user: RemoteUser user we are sharing with
:param auth_role: str project role eg 'project_admin' email is customized based on this setting.
:param user_message: str message to be sent with the share
:param share_users: [RemoteUser] users to have this project shared with after delivery (delivery only)
:return: the email the user should receive a message on soon | entailment |
def _copy_project(self, project, new_project_name, path_filter):
"""
Copy pre-existing project with name project_name to non-existing project new_project_name.
:param project: remotestore.RemoteProject project to copy from
:param new_project_name: str project to copy to
:param path_filter: PathFilter: filters what files are shared
:return: RemoteProject new project we copied data to
"""
temp_directory = tempfile.mkdtemp()
new_project_name_or_id = ProjectNameOrId.create_from_name(new_project_name)
remote_project = self.remote_store.fetch_remote_project(new_project_name_or_id)
if remote_project:
raise ValueError("A project with name '{}' already exists.".format(new_project_name))
activity = CopyActivity(self.remote_store.data_service, project, new_project_name)
self._download_project(activity, project, temp_directory, path_filter)
self._upload_project(activity, new_project_name, temp_directory)
activity.finished()
shutil.rmtree(temp_directory)
return self.remote_store.fetch_remote_project(new_project_name_or_id, must_exist=True) | Copy pre-existing project with name project_name to non-existing project new_project_name.
:param project: remotestore.RemoteProject project to copy from
:param new_project_name: str project to copy to
:param path_filter: PathFilter: filters what files are shared
:return: RemoteProject new project we copied data to | entailment |
def _download_project(self, activity, project, temp_directory, path_filter):
"""
Download the project with project_name to temp_directory.
:param activity: CopyActivity: info about the copy activity are downloading for
:param project: remotestore.RemoteProject project to download
:param temp_directory: str path to directory we can download into
:param path_filter: PathFilter: filters what files are shared
"""
self.print_func("Downloading a copy of '{}'.".format(project.name))
project_download = ProjectDownload(self.remote_store, project, temp_directory, path_filter,
file_download_pre_processor=DownloadedFileRelations(activity))
project_download.run() | Download the project with project_name to temp_directory.
:param activity: CopyActivity: info about the copy activity are downloading for
:param project: remotestore.RemoteProject project to download
:param temp_directory: str path to directory we can download into
:param path_filter: PathFilter: filters what files are shared | entailment |
def _upload_project(self, activity, project_name, temp_directory):
"""
Upload the contents of temp_directory into project_name
:param activity: CopyActivity: info about the copy activity are uploading for
:param project_name: str project name we will upload files to
:param temp_directory: str path to directory who's files we will upload
"""
self.print_func("Uploading to '{}'.".format(project_name))
items_to_send = [os.path.join(temp_directory, item) for item in os.listdir(os.path.abspath(temp_directory))]
project_name_or_id = ProjectNameOrId.create_from_name(project_name)
project_upload = ProjectUpload(self.config, project_name_or_id, items_to_send,
file_upload_post_processor=UploadedFileRelations(activity))
project_upload.run() | Upload the contents of temp_directory into project_name
:param activity: CopyActivity: info about the copy activity are uploading for
:param project_name: str project name we will upload files to
:param temp_directory: str path to directory who's files we will upload | entailment |
def _is_current_user(self, some_user):
"""
Is the specified user the current user?
:param some_user: RemoteUser user we want to check against the current user
:return: boolean: True if the current user is the passed in user
"""
current_user = self.remote_store.get_current_user()
return current_user.id == some_user.id | Is the specified user the current user?
:param some_user: RemoteUser user we want to check against the current user
:return: boolean: True if the current user is the passed in user | entailment |
def finished(self):
"""
Mark the activity as finished
"""
self.data_service.update_activity(self.id, self.name, self.desc,
started_on=self.started,
ended_on=self._current_timestamp_str()) | Mark the activity as finished | entailment |
def run(self, data_service, project_file):
"""
Attach a remote file to activity with used relationship.
:param data_service: DataServiceApi: service used to attach relationship
:param project_file: ProjectFile: contains details about a file we will attach
"""
remote_path = project_file.path
file_dict = data_service.get_file(project_file.id).json()
file_version_id = file_dict['current_version']['id']
data_service.create_used_relation(self.activity.id, KindType.file_str, file_version_id)
self.activity.remote_path_to_file_version_id[remote_path] = file_version_id | Attach a remote file to activity with used relationship.
:param data_service: DataServiceApi: service used to attach relationship
:param project_file: ProjectFile: contains details about a file we will attach | entailment |
def run(self, data_service, file_details):
"""
Attach a remote file to activity with was generated by relationship.
:param data_service: DataServiceApi: service used to attach relationship
:param file_details: dict: response from DukeDS POST to /files/ containing current_version id
"""
file_version_id = file_details['current_version']['id']
data_service.create_was_generated_by_relation(self.activity.id, KindType.file_str, file_version_id)
used_entity_id = self._lookup_used_entity_id(file_details)
data_service.create_was_derived_from_relation(used_entity_id, KindType.file_str,
file_version_id, KindType.file_str) | Attach a remote file to activity with was generated by relationship.
:param data_service: DataServiceApi: service used to attach relationship
:param file_details: dict: response from DukeDS POST to /files/ containing current_version id | entailment |
def _lookup_used_entity_id(self, file_details):
"""
Return the file_version_id associated with the path from file_details.
The file_version_id is looked up from a dictionary in the activity.
:param file_details: dict: response from DukeDS POST to /files/
:return: str: file_version_id uuid
"""
# Since this uses the response from POST to /files/ this will include the ancestors and not be
# effected by exclude_response_fields that were used when listing the project
name_parts = [ancestor['name'] for ancestor in file_details['ancestors']
if ancestor['kind'] == KindType.folder_str]
name_parts.append(file_details['name'])
remote_path = os.sep.join(name_parts)
return self.activity.remote_path_to_file_version_id[remote_path] | Return the file_version_id associated with the path from file_details.
The file_version_id is looked up from a dictionary in the activity.
:param file_details: dict: response from DukeDS POST to /files/
:return: str: file_version_id uuid | entailment |
def create_config(allow_insecure_config_file=False):
"""
Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF)
:param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions.
:return: Config with the configuration to use for DDSClient.
"""
config = Config()
config.add_properties(GLOBAL_CONFIG_FILENAME)
user_config_filename = get_user_config_filename()
if user_config_filename == LOCAL_CONFIG_FILENAME and not allow_insecure_config_file:
verify_file_private(user_config_filename)
config.add_properties(user_config_filename)
return config | Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF)
:param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions.
:return: Config with the configuration to use for DDSClient. | entailment |
def add_properties(self, filename):
"""
Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
with open(filename, 'r') as yaml_file:
self.update_properties(yaml.safe_load(yaml_file)) | Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from | entailment |
def get_portal_url_base(self):
"""
Determine root url of the data service from the url specified.
:return: str root url of the data service (eg: https://dataservice.duke.edu)
"""
api_url = urlparse(self.url).hostname
portal_url = re.sub('^api\.', '', api_url)
portal_url = re.sub(r'api', '', portal_url)
return portal_url | Determine root url of the data service from the url specified.
:return: str root url of the data service (eg: https://dataservice.duke.edu) | entailment |
def auth(self):
"""
Contains the auth token for use with connecting to the dataservice.
:return:
"""
return self.values.get(Config.AUTH, os.environ.get(AUTH_ENV_KEY_NAME, None)) | Contains the auth token for use with connecting to the dataservice.
:return: | entailment |
def upload_bytes_per_chunk(self):
"""
Return the bytes per chunk to be sent to external store.
:return: int bytes per upload chunk
"""
value = self.values.get(Config.UPLOAD_BYTES_PER_CHUNK, DDS_DEFAULT_UPLOAD_CHUNKS)
return Config.parse_bytes_str(value) | Return the bytes per chunk to be sent to external store.
:return: int bytes per upload chunk | entailment |
def download_workers(self):
"""
Return the number of parallel works to use when downloading a file.
:return: int number of workers. Specify None or 1 to disable parallel downloading
"""
# Profiling download on different servers showed half the number of CPUs to be optimum for speed.
default_workers = int(math.ceil(default_num_workers() / 2))
return self.values.get(Config.DOWNLOAD_WORKERS, default_workers) | Return the number of parallel works to use when downloading a file.
:return: int number of workers. Specify None or 1 to disable parallel downloading | entailment |
def parse_bytes_str(value):
"""
Given a value return the integer number of bytes it represents.
Trailing "MB" causes the value multiplied by 1024*1024
:param value:
:return: int number of bytes represented by value.
"""
if type(value) == str:
if "MB" in value:
return int(value.replace("MB", "")) * MB_TO_BYTES
else:
return int(value)
else:
return value | Given a value return the integer number of bytes it represents.
Trailing "MB" causes the value multiplied by 1024*1024
:param value:
:return: int number of bytes represented by value. | entailment |
def local_error(self, originalValue, calculatedValue):
"""Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric
"""
originalValue = originalValue[0]
calculatedValue = calculatedValue[0]
return abs(originalValue - calculatedValue) | Calculates the error between the two given values.
:param list originalValue: List containing the values of the original data.
:param list calculatedValue: List containing the values of the calculated TimeSeries that
corresponds to originalValue.
:return: Returns the error measure of the two given values.
:rtype: numeric | entailment |
def describe_instances(self, *instance_ids):
"""Describe current instances."""
instances = {}
for pos, instance_id in enumerate(instance_ids):
instances["InstanceId.%d" % (pos + 1)] = instance_id
query = self.query_factory(
action="DescribeInstances", creds=self.creds,
endpoint=self.endpoint, other_params=instances)
d = query.submit()
return d.addCallback(self.parser.describe_instances) | Describe current instances. | entailment |
def run_instances(self, image_id, min_count, max_count,
security_groups=None, key_name=None, instance_type=None,
user_data=None, availability_zone=None, kernel_id=None,
ramdisk_id=None, subnet_id=None, security_group_ids=None):
"""Run new instances.
TODO: blockDeviceMapping, monitoring, subnetId
"""
params = {"ImageId": image_id, "MinCount": str(min_count),
"MaxCount": str(max_count)}
if key_name is not None:
params["KeyName"] = key_name
if subnet_id is not None:
params["SubnetId"] = subnet_id
if security_group_ids is not None:
for i, id in enumerate(security_group_ids):
params["SecurityGroupId.%d" % (i + 1)] = id
else:
msg = "You must specify the security_group_ids with the subnet_id"
raise ValueError(msg)
elif security_groups is not None:
for i, name in enumerate(security_groups):
params["SecurityGroup.%d" % (i + 1)] = name
else:
msg = ("You must specify either the subnet_id and "
"security_group_ids or security_groups")
raise ValueError(msg)
if user_data is not None:
params["UserData"] = b64encode(user_data)
if instance_type is not None:
params["InstanceType"] = instance_type
if availability_zone is not None:
params["Placement.AvailabilityZone"] = availability_zone
if kernel_id is not None:
params["KernelId"] = kernel_id
if ramdisk_id is not None:
params["RamdiskId"] = ramdisk_id
query = self.query_factory(
action="RunInstances", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.run_instances) | Run new instances.
TODO: blockDeviceMapping, monitoring, subnetId | entailment |
def get_console_output(self, instance_id):
"""Get the console output for a single instance."""
InstanceIDParam = {"InstanceId": instance_id}
query = self.query_factory(
action="GetConsoleOutput", creds=self.creds,
endpoint=self.endpoint, other_params=InstanceIDParam)
d = query.submit()
return d.addCallback(self.parser.get_console_output) | Get the console output for a single instance. | entailment |
def describe_security_groups(self, *names):
"""Describe security groups.
@param names: Optionally, a list of security group names to describe.
Defaults to all security groups in the account.
@return: A C{Deferred} that will fire with a list of L{SecurityGroup}s
retrieved from the cloud.
"""
group_names = {}
if names:
group_names = dict([("GroupName.%d" % (i + 1), name)
for i, name in enumerate(names)])
query = self.query_factory(
action="DescribeSecurityGroups", creds=self.creds,
endpoint=self.endpoint, other_params=group_names)
d = query.submit()
return d.addCallback(self.parser.describe_security_groups) | Describe security groups.
@param names: Optionally, a list of security group names to describe.
Defaults to all security groups in the account.
@return: A C{Deferred} that will fire with a list of L{SecurityGroup}s
retrieved from the cloud. | entailment |
def create_security_group(self, name, description, vpc_id=None):
"""Create security group.
@param name: Name of the new security group.
@param description: Description of the new security group.
@param vpc_id: ID of the VPC to which the security group will belong.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation.
"""
parameters = {"GroupName": name, "GroupDescription": description}
if vpc_id:
parameters["VpcId"] = vpc_id
query = self.query_factory(
action="CreateSecurityGroup", creds=self.creds,
endpoint=self.endpoint, other_params=parameters)
d = query.submit()
return d.addCallback(self.parser.create_security_group) | Create security group.
@param name: Name of the new security group.
@param description: Description of the new security group.
@param vpc_id: ID of the VPC to which the security group will belong.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation. | entailment |
def authorize_security_group(
self, group_name=None, group_id=None, source_group_name="", source_group_owner_id="",
ip_protocol="", from_port="", to_port="", cidr_ip=""):
"""
There are two ways to use C{authorize_security_group}:
1) associate an existing group (source group) with the one that you
are targeting (group_name) with an authorization update; or
2) associate a set of IP permissions with the group you are
targeting with an authorization update.
@param group_name: The group you will be modifying with a new
authorization.
@param group_id: The id of the group you will be modifying with
a new authorization.
Optionally, the following parameters:
@param source_group_name: Name of security group to authorize access to
when operating on a user/group pair.
@param source_group_owner_id: Owner of security group to authorize
access to when operating on a user/group pair.
If those parameters are not specified, then the following must be:
@param ip_protocol: IP protocol to authorize access to when operating
on a CIDR IP.
@param from_port: Bottom of port range to authorize access to when
operating on a CIDR IP. This contains the ICMP type if ICMP is
being authorized.
@param to_port: Top of port range to authorize access to when operating
on a CIDR IP. This contains the ICMP code if ICMP is being
authorized.
@param cidr_ip: CIDR IP range to authorize access to when operating on
a CIDR IP.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation.
"""
if source_group_name and source_group_owner_id:
parameters = {
"SourceSecurityGroupName": source_group_name,
"SourceSecurityGroupOwnerId": source_group_owner_id,
}
elif ip_protocol and from_port and to_port and cidr_ip:
parameters = {
"IpProtocol": ip_protocol,
"FromPort": from_port,
"ToPort": to_port,
"CidrIp": cidr_ip,
}
else:
msg = ("You must specify either both group parameters or "
"all the ip parameters.")
raise ValueError(msg)
if group_id:
parameters["GroupId"] = group_id
elif group_name:
parameters["GroupName"] = group_name
else:
raise ValueError("You must specify either the group name of the group id.")
query = self.query_factory(
action="AuthorizeSecurityGroupIngress", creds=self.creds,
endpoint=self.endpoint, other_params=parameters)
d = query.submit()
return d.addCallback(self.parser.truth_return) | There are two ways to use C{authorize_security_group}:
1) associate an existing group (source group) with the one that you
are targeting (group_name) with an authorization update; or
2) associate a set of IP permissions with the group you are
targeting with an authorization update.
@param group_name: The group you will be modifying with a new
authorization.
@param group_id: The id of the group you will be modifying with
a new authorization.
Optionally, the following parameters:
@param source_group_name: Name of security group to authorize access to
when operating on a user/group pair.
@param source_group_owner_id: Owner of security group to authorize
access to when operating on a user/group pair.
If those parameters are not specified, then the following must be:
@param ip_protocol: IP protocol to authorize access to when operating
on a CIDR IP.
@param from_port: Bottom of port range to authorize access to when
operating on a CIDR IP. This contains the ICMP type if ICMP is
being authorized.
@param to_port: Top of port range to authorize access to when operating
on a CIDR IP. This contains the ICMP code if ICMP is being
authorized.
@param cidr_ip: CIDR IP range to authorize access to when operating on
a CIDR IP.
@return: A C{Deferred} that will fire with a truth value for the
success of the operation. | entailment |
def authorize_group_permission(
self, group_name, source_group_name, source_group_owner_id):
"""
This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d | This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | entailment |
def authorize_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip):
"""
This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}.
"""
d = self.authorize_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d | This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{authorize_security_group}. | entailment |
def revoke_group_permission(
self, group_name, source_group_name, source_group_owner_id):
"""
This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}.
"""
d = self.revoke_security_group(
group_name,
source_group_name=source_group_name,
source_group_owner_id=source_group_owner_id)
return d | This is a convenience function that wraps the "authorize group"
functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}. | entailment |
def revoke_ip_permission(
self, group_name, ip_protocol, from_port, to_port, cidr_ip):
"""
This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}.
"""
d = self.revoke_security_group(
group_name,
ip_protocol=ip_protocol, from_port=from_port, to_port=to_port,
cidr_ip=cidr_ip)
return d | This is a convenience function that wraps the "authorize ip
permission" functionality of the C{authorize_security_group} method.
For an explanation of the parameters, see C{revoke_security_group}. | entailment |
def describe_volumes(self, *volume_ids):
"""Describe available volumes."""
volumeset = {}
for pos, volume_id in enumerate(volume_ids):
volumeset["VolumeId.%d" % (pos + 1)] = volume_id
query = self.query_factory(
action="DescribeVolumes", creds=self.creds, endpoint=self.endpoint,
other_params=volumeset)
d = query.submit()
return d.addCallback(self.parser.describe_volumes) | Describe available volumes. | entailment |
def create_volume(self, availability_zone, size=None, snapshot_id=None):
"""Create a new volume."""
params = {"AvailabilityZone": availability_zone}
if ((snapshot_id is None and size is None) or
(snapshot_id is not None and size is not None)):
raise ValueError("Please provide either size or snapshot_id")
if size is not None:
params["Size"] = str(size)
if snapshot_id is not None:
params["SnapshotId"] = snapshot_id
query = self.query_factory(
action="CreateVolume", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.create_volume) | Create a new volume. | entailment |
def describe_snapshots(self, *snapshot_ids):
"""Describe available snapshots.
TODO: ownerSet, restorableBySet
"""
snapshot_set = {}
for pos, snapshot_id in enumerate(snapshot_ids):
snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id
query = self.query_factory(
action="DescribeSnapshots", creds=self.creds,
endpoint=self.endpoint, other_params=snapshot_set)
d = query.submit()
return d.addCallback(self.parser.snapshots) | Describe available snapshots.
TODO: ownerSet, restorableBySet | entailment |
def delete_snapshot(self, snapshot_id):
"""Remove a previously created snapshot."""
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) | Remove a previously created snapshot. | entailment |
def attach_volume(self, volume_id, instance_id, device):
"""Attach the given volume to the specified instance at C{device}."""
query = self.query_factory(
action="AttachVolume", creds=self.creds, endpoint=self.endpoint,
other_params={"VolumeId": volume_id, "InstanceId": instance_id,
"Device": device})
d = query.submit()
return d.addCallback(self.parser.attach_volume) | Attach the given volume to the specified instance at C{device}. | entailment |
def describe_keypairs(self, *keypair_names):
"""Returns information about key pairs available."""
keypairs = {}
for index, keypair_name in enumerate(keypair_names):
keypairs["KeyName.%d" % (index + 1)] = keypair_name
query = self.query_factory(
action="DescribeKeyPairs", creds=self.creds,
endpoint=self.endpoint, other_params=keypairs)
d = query.submit()
return d.addCallback(self.parser.describe_keypairs) | Returns information about key pairs available. | entailment |
def create_keypair(self, keypair_name):
"""
Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances.
"""
query = self.query_factory(
action="CreateKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name})
d = query.submit()
return d.addCallback(self.parser.create_keypair) | Create a new 2048 bit RSA key pair and return a unique ID that can be
used to reference the created key pair when launching new instances. | entailment |
def import_keypair(self, keypair_name, key_material):
"""
Import an existing SSH key into EC2. It supports:
* OpenSSH public key format (e.g., the format in
~/.ssh/authorized_keys)
* Base64 encoded DER format
* SSH public key file format as specified in RFC4716
@param keypair_name: The name of the key to create.
@param key_material: The material in one of the supported format.
@return: A L{Deferred} firing with a L{model.Keypair} instance if
successful.
TODO: there is no corresponding method in the 2009-11-30 version
of the ec2 wsdl. Delete this?
"""
query = self.query_factory(
action="ImportKeyPair", creds=self.creds, endpoint=self.endpoint,
other_params={"KeyName": keypair_name,
"PublicKeyMaterial": b64encode(key_material)})
d = query.submit()
return d.addCallback(self.parser.import_keypair, key_material) | Import an existing SSH key into EC2. It supports:
* OpenSSH public key format (e.g., the format in
~/.ssh/authorized_keys)
* Base64 encoded DER format
* SSH public key file format as specified in RFC4716
@param keypair_name: The name of the key to create.
@param key_material: The material in one of the supported format.
@return: A L{Deferred} firing with a L{model.Keypair} instance if
successful.
TODO: there is no corresponding method in the 2009-11-30 version
of the ec2 wsdl. Delete this? | entailment |
def allocate_address(self):
"""
Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated.
"""
# XXX remove empty other_params
query = self.query_factory(
action="AllocateAddress", creds=self.creds, endpoint=self.endpoint,
other_params={})
d = query.submit()
return d.addCallback(self.parser.allocate_address) | Acquire an elastic IP address to be attached subsequently to EC2
instances.
@return: the IP address allocated. | entailment |
def release_address(self, address):
"""
Release a previously allocated address returned by C{allocate_address}.
@return: C{True} if the operation succeeded.
"""
query = self.query_factory(
action="ReleaseAddress", creds=self.creds, endpoint=self.endpoint,
other_params={"PublicIp": address})
d = query.submit()
return d.addCallback(self.parser.truth_return) | Release a previously allocated address returned by C{allocate_address}.
@return: C{True} if the operation succeeded. | entailment |
def describe_addresses(self, *addresses):
"""
List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}.
"""
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses) | List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}. | entailment |
def instances_set(self, root, reservation):
"""Parse instance data out of an XML payload.
@param root: The root node of the XML payload.
@param reservation: The L{Reservation} associated with the instances
from the response.
@return: A C{list} of L{Instance}s.
"""
instances = []
for instance_data in root.find("instancesSet"):
instances.append(self.instance(instance_data, reservation))
return instances | Parse instance data out of an XML payload.
@param root: The root node of the XML payload.
@param reservation: The L{Reservation} associated with the instances
from the response.
@return: A C{list} of L{Instance}s. | entailment |
def instance(self, instance_data, reservation):
"""Parse instance data out of an XML payload.
@param instance_data: An XML node containing instance data.
@param reservation: The L{Reservation} associated with the instance.
@return: An L{Instance}.
TODO: reason, platform, monitoring, subnetId, vpcId, privateIpAddress,
ipAddress, stateReason, architecture, rootDeviceName,
blockDeviceMapping, instanceLifecycle, spotInstanceRequestId.
"""
for group_data in instance_data.find("groupSet"):
group_id = group_data.findtext("groupId")
group_name = group_data.findtext("groupName")
reservation.groups.append((group_id, group_name))
instance_id = instance_data.findtext("instanceId")
instance_state = instance_data.find(
"instanceState").findtext("name")
private_dns_name = instance_data.findtext("privateDnsName")
dns_name = instance_data.findtext("dnsName")
private_ip_address = instance_data.findtext("privateIpAddress")
ip_address = instance_data.findtext("ipAddress")
key_name = instance_data.findtext("keyName")
ami_launch_index = instance_data.findtext("amiLaunchIndex")
products = []
product_codes = instance_data.find("productCodes")
if product_codes is not None:
for product_data in instance_data.find("productCodes"):
products.append(product_data.text)
instance_type = instance_data.findtext("instanceType")
launch_time = instance_data.findtext("launchTime")
placement = instance_data.find("placement").findtext(
"availabilityZone")
kernel_id = instance_data.findtext("kernelId")
ramdisk_id = instance_data.findtext("ramdiskId")
image_id = instance_data.findtext("imageId")
instance = model.Instance(
instance_id, instance_state, instance_type, image_id,
private_dns_name, dns_name, private_ip_address, ip_address,
key_name, ami_launch_index, launch_time, placement, products,
kernel_id, ramdisk_id, reservation=reservation)
return instance | Parse instance data out of an XML payload.
@param instance_data: An XML node containing instance data.
@param reservation: The L{Reservation} associated with the instance.
@return: An L{Instance}.
TODO: reason, platform, monitoring, subnetId, vpcId, privateIpAddress,
ipAddress, stateReason, architecture, rootDeviceName,
blockDeviceMapping, instanceLifecycle, spotInstanceRequestId. | entailment |
def describe_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS.
"""
root = XML(xml_bytes)
results = []
# May be a more elegant way to do this:
for reservation_data in root.find("reservationSet"):
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=reservation_data.findtext("reservationId"),
owner_id=reservation_data.findtext("ownerId"))
# Get the list of instances.
instances = self.instances_set(
reservation_data, reservation)
results.extend(instances)
return results | Parse the reservations XML payload that is returned from an AWS
describeInstances API call.
Instead of returning the reservations as the "top-most" object, we
return the object that most developers and their code will be
interested in: the instances. In instances reservation is available on
the instance object.
The following instance attributes are optional:
* ami_launch_index
* key_name
* kernel_id
* product_codes
* ramdisk_id
* reason
@param xml_bytes: raw XML payload from AWS. | entailment |
def run_instances(self, xml_bytes):
"""
Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element.
"""
root = XML(xml_bytes)
# Get the security group information.
groups = []
for group_data in root.find("groupSet"):
group_id = group_data.findtext("groupId")
groups.append(group_id)
# Create a reservation object with the parsed data.
reservation = model.Reservation(
reservation_id=root.findtext("reservationId"),
owner_id=root.findtext("ownerId"),
groups=groups)
# Get the list of instances.
instances = self.instances_set(root, reservation)
return instances | Parse the reservations XML payload that is returned from an AWS
RunInstances API call.
@param xml_bytes: raw XML bytes with a C{RunInstancesResponse} root
element. | entailment |
def terminate_instances(self, xml_bytes):
"""Parse the XML returned by the C{TerminateInstances} function.
@param xml_bytes: XML bytes with a C{TerminateInstancesResponse} root
element.
@return: An iterable of C{tuple} of (instanceId, previousState,
currentState) for the ec2 instances that where terminated.
"""
root = XML(xml_bytes)
result = []
# May be a more elegant way to do this:
instances = root.find("instancesSet")
if instances is not None:
for instance in instances:
instanceId = instance.findtext("instanceId")
previousState = instance.find("previousState").findtext(
"name")
currentState = instance.find("currentState").findtext(
"name")
result.append((instanceId, previousState, currentState))
return result | Parse the XML returned by the C{TerminateInstances} function.
@param xml_bytes: XML bytes with a C{TerminateInstancesResponse} root
element.
@return: An iterable of C{tuple} of (instanceId, previousState,
currentState) for the ec2 instances that where terminated. | entailment |
def describe_security_groups(self, xml_bytes):
"""Parse the XML returned by the C{DescribeSecurityGroups} function.
@param xml_bytes: XML bytes with a C{DescribeSecurityGroupsResponse}
root element.
@return: A list of L{SecurityGroup} instances.
"""
root = XML(xml_bytes)
result = []
for group_info in root.findall("securityGroupInfo/item"):
id = group_info.findtext("groupId")
name = group_info.findtext("groupName")
description = group_info.findtext("groupDescription")
owner_id = group_info.findtext("ownerId")
allowed_groups = []
allowed_ips = []
ip_permissions = group_info.find("ipPermissions")
if ip_permissions is None:
ip_permissions = ()
for ip_permission in ip_permissions:
# openstack doesn't handle self authorized groups properly
# XXX this is an upstream problem and should be addressed there
# lp bug #829609
ip_protocol = ip_permission.findtext("ipProtocol")
from_port = ip_permission.findtext("fromPort")
to_port = ip_permission.findtext("toPort")
if from_port:
from_port = int(from_port)
if to_port:
to_port = int(to_port)
for groups in ip_permission.findall("groups/item") or ():
user_id = groups.findtext("userId")
group_name = groups.findtext("groupName")
if user_id and group_name:
if (user_id, group_name) not in allowed_groups:
allowed_groups.append((user_id, group_name))
for ip_ranges in ip_permission.findall("ipRanges/item") or ():
cidr_ip = ip_ranges.findtext("cidrIp")
allowed_ips.append(
model.IPPermission(
ip_protocol, from_port, to_port, cidr_ip))
allowed_groups = [model.UserIDGroupPair(user_id, group_name)
for user_id, group_name in allowed_groups]
security_group = model.SecurityGroup(
id, name, description, owner_id=owner_id,
groups=allowed_groups, ips=allowed_ips)
result.append(security_group)
return result | Parse the XML returned by the C{DescribeSecurityGroups} function.
@param xml_bytes: XML bytes with a C{DescribeSecurityGroupsResponse}
root element.
@return: A list of L{SecurityGroup} instances. | entailment |
def describe_volumes(self, xml_bytes):
"""Parse the XML returned by the C{DescribeVolumes} function.
@param xml_bytes: XML bytes with a C{DescribeVolumesResponse} root
element.
@return: A list of L{Volume} instances.
TODO: attachementSetItemResponseType#deleteOnTermination
"""
root = XML(xml_bytes)
result = []
for volume_data in root.find("volumeSet"):
volume_id = volume_data.findtext("volumeId")
size = int(volume_data.findtext("size"))
snapshot_id = volume_data.findtext("snapshotId")
availability_zone = volume_data.findtext("availabilityZone")
status = volume_data.findtext("status")
create_time = volume_data.findtext("createTime")
create_time = datetime.strptime(
create_time[:19], "%Y-%m-%dT%H:%M:%S")
volume = model.Volume(
volume_id, size, status, create_time, availability_zone,
snapshot_id)
result.append(volume)
for attachment_data in volume_data.find("attachmentSet"):
instance_id = attachment_data.findtext("instanceId")
device = attachment_data.findtext("device")
status = attachment_data.findtext("status")
attach_time = attachment_data.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
attachment = model.Attachment(
instance_id, device, status, attach_time)
volume.attachments.append(attachment)
return result | Parse the XML returned by the C{DescribeVolumes} function.
@param xml_bytes: XML bytes with a C{DescribeVolumesResponse} root
element.
@return: A list of L{Volume} instances.
TODO: attachementSetItemResponseType#deleteOnTermination | entailment |
def create_volume(self, xml_bytes):
"""Parse the XML returned by the C{CreateVolume} function.
@param xml_bytes: XML bytes with a C{CreateVolumeResponse} root
element.
@return: The L{Volume} instance created.
"""
root = XML(xml_bytes)
volume_id = root.findtext("volumeId")
size = int(root.findtext("size"))
snapshot_id = root.findtext("snapshotId")
availability_zone = root.findtext("availabilityZone")
status = root.findtext("status")
create_time = root.findtext("createTime")
create_time = datetime.strptime(
create_time[:19], "%Y-%m-%dT%H:%M:%S")
volume = model.Volume(
volume_id, size, status, create_time, availability_zone,
snapshot_id)
return volume | Parse the XML returned by the C{CreateVolume} function.
@param xml_bytes: XML bytes with a C{CreateVolumeResponse} root
element.
@return: The L{Volume} instance created. | entailment |
def snapshots(self, xml_bytes):
"""Parse the XML returned by the C{DescribeSnapshots} function.
@param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root
element.
@return: A list of L{Snapshot} instances.
TODO: ownersSet, restorableBySet, ownerId, volumeSize, description,
ownerAlias.
"""
root = XML(xml_bytes)
result = []
for snapshot_data in root.find("snapshotSet"):
snapshot_id = snapshot_data.findtext("snapshotId")
volume_id = snapshot_data.findtext("volumeId")
status = snapshot_data.findtext("status")
start_time = snapshot_data.findtext("startTime")
start_time = datetime.strptime(
start_time[:19], "%Y-%m-%dT%H:%M:%S")
progress = snapshot_data.findtext("progress")[:-1]
progress = float(progress or "0") / 100.
snapshot = model.Snapshot(
snapshot_id, volume_id, status, start_time, progress)
result.append(snapshot)
return result | Parse the XML returned by the C{DescribeSnapshots} function.
@param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root
element.
@return: A list of L{Snapshot} instances.
TODO: ownersSet, restorableBySet, ownerId, volumeSize, description,
ownerAlias. | entailment |
def create_snapshot(self, xml_bytes):
"""Parse the XML returned by the C{CreateSnapshot} function.
@param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root
element.
@return: The L{Snapshot} instance created.
TODO: ownerId, volumeSize, description.
"""
root = XML(xml_bytes)
snapshot_id = root.findtext("snapshotId")
volume_id = root.findtext("volumeId")
status = root.findtext("status")
start_time = root.findtext("startTime")
start_time = datetime.strptime(
start_time[:19], "%Y-%m-%dT%H:%M:%S")
progress = root.findtext("progress")[:-1]
progress = float(progress or "0") / 100.
return model.Snapshot(
snapshot_id, volume_id, status, start_time, progress) | Parse the XML returned by the C{CreateSnapshot} function.
@param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root
element.
@return: The L{Snapshot} instance created.
TODO: ownerId, volumeSize, description. | entailment |
def attach_volume(self, xml_bytes):
"""Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device
"""
root = XML(xml_bytes)
status = root.findtext("status")
attach_time = root.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
return {"status": status, "attach_time": attach_time} | Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device | entailment |
def describe_keypairs(self, xml_bytes):
"""Parse the XML returned by the C{DescribeKeyPairs} function.
@param xml_bytes: XML bytes with a C{DescribeKeyPairsResponse} root
element.
@return: a C{list} of L{Keypair}.
"""
results = []
root = XML(xml_bytes)
keypairs = root.find("keySet")
if keypairs is None:
return results
for keypair_data in keypairs:
key_name = keypair_data.findtext("keyName")
key_fingerprint = keypair_data.findtext("keyFingerprint")
results.append(model.Keypair(key_name, key_fingerprint))
return results | Parse the XML returned by the C{DescribeKeyPairs} function.
@param xml_bytes: XML bytes with a C{DescribeKeyPairsResponse} root
element.
@return: a C{list} of L{Keypair}. | entailment |
def create_keypair(self, xml_bytes):
"""Parse the XML returned by the C{CreateKeyPair} function.
@param xml_bytes: XML bytes with a C{CreateKeyPairResponse} root
element.
@return: The L{Keypair} instance created.
"""
keypair_data = XML(xml_bytes)
key_name = keypair_data.findtext("keyName")
key_fingerprint = keypair_data.findtext("keyFingerprint")
key_material = keypair_data.findtext("keyMaterial")
return model.Keypair(key_name, key_fingerprint, key_material) | Parse the XML returned by the C{CreateKeyPair} function.
@param xml_bytes: XML bytes with a C{CreateKeyPairResponse} root
element.
@return: The L{Keypair} instance created. | entailment |
def describe_addresses(self, xml_bytes):
"""Parse the XML returned by the C{DescribeAddresses} function.
@param xml_bytes: XML bytes with a C{DescribeAddressesResponse} root
element.
@return: a C{list} of L{tuple} of (publicIp, instancId).
"""
results = []
root = XML(xml_bytes)
for address_data in root.find("addressesSet"):
address = address_data.findtext("publicIp")
instance_id = address_data.findtext("instanceId")
results.append((address, instance_id))
return results | Parse the XML returned by the C{DescribeAddresses} function.
@param xml_bytes: XML bytes with a C{DescribeAddressesResponse} root
element.
@return: a C{list} of L{tuple} of (publicIp, instancId). | entailment |
def describe_availability_zones(self, xml_bytes):
"""Parse the XML returned by the C{DescribeAvailibilityZones} function.
@param xml_bytes: XML bytes with a C{DescribeAvailibilityZonesResponse}
root element.
@return: a C{list} of L{AvailabilityZone}.
TODO: regionName, messageSet
"""
results = []
root = XML(xml_bytes)
for zone_data in root.find("availabilityZoneInfo"):
zone_name = zone_data.findtext("zoneName")
zone_state = zone_data.findtext("zoneState")
results.append(model.AvailabilityZone(zone_name, zone_state))
return results | Parse the XML returned by the C{DescribeAvailibilityZones} function.
@param xml_bytes: XML bytes with a C{DescribeAvailibilityZonesResponse}
root element.
@return: a C{list} of L{AvailabilityZone}.
TODO: regionName, messageSet | entailment |
def sign(self, hash_type="sha256"):
"""Sign this query using its built in credentials.
@param hash_type: if the SignatureVersion is 2, specify the type of
hash to use, either "sha1" or "sha256". It defaults to the latter.
This prepares it to be sent, and should be done as the last step before
submitting the query. Signing is done automatically - this is a public
method to facilitate testing.
"""
version = self.params["SignatureVersion"]
if version == "2":
self.params["SignatureMethod"] = "Hmac%s" % hash_type.upper()
self.params["Signature"] = self.signature.compute() | Sign this query using its built in credentials.
@param hash_type: if the SignatureVersion is 2, specify the type of
hash to use, either "sha1" or "sha256". It defaults to the latter.
This prepares it to be sent, and should be done as the last step before
submitting the query. Signing is done automatically - this is a public
method to facilitate testing. | entailment |
def submit(self):
"""Submit this query.
@return: A deferred from get_page
"""
self.sign()
url = self.endpoint.get_uri()
method = self.endpoint.method
params = self.signature.get_canonical_query_params()
headers = {}
kwargs = {"method": method}
if method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
kwargs["postdata"] = params
else:
url += "?%s" % params
if self.endpoint.get_host() != self.endpoint.get_canonical_host():
headers["Host"] = self.endpoint.get_canonical_host()
if headers:
kwargs["headers"] = headers
if self.timeout:
kwargs["timeout"] = self.timeout
d = self.get_page(url, **kwargs)
return d.addErrback(ec2_error_wrapper) | Submit this query.
@return: A deferred from get_page | entailment |
def compute(self):
"""Compute and return the signature according to the given data."""
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: '%s'" % version)
return self.creds.sign(bytes, hash_type) | Compute and return the signature according to the given data. | entailment |
def old_signing_text(self):
"""Return the text needed for signing using SignatureVersion 1."""
result = []
lower_cmp = lambda x, y: cmp(x[0].lower(), y[0].lower())
for key, value in sorted(self.params.items(), cmp=lower_cmp):
result.append("%s%s" % (key, value))
return "".join(result) | Return the text needed for signing using SignatureVersion 1. | entailment |
def signing_text(self):
"""Return the text to be signed when signing the query."""
result = "%s\n%s\n%s\n%s" % (self.endpoint.method,
self.endpoint.get_canonical_host(),
self.endpoint.path,
self.get_canonical_query_params())
return result | Return the text to be signed when signing the query. | entailment |
def get_canonical_query_params(self):
"""Return the canonical query params (used in signing)."""
result = []
for key, value in self.sorted_params():
result.append("%s=%s" % (self.encode(key), self.encode(value)))
return "&".join(result) | Return the canonical query params (used in signing). | entailment |
def encode(self, string):
"""Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded.
"""
if isinstance(string, unicode):
string = string.encode("utf-8")
return quote(string, safe="~") | Encode a_string as per the canonicalisation encoding rules.
See the AWS dev reference page 186 (2009-11-30 version).
@return: a_string encoded. | entailment |
def from_xml(cls, xml_bytes):
"""
Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse}
"""
root = XML(xml_bytes)
return cls(root.findtext('Bucket'),
root.findtext('Key'),
root.findtext('UploadId')) | Create an instance of this from XML bytes.
@param xml_bytes: C{str} bytes of XML to parse
@return: an instance of L{MultipartInitiationResponse} | entailment |
def pythag(a, b):
"""Computer c = (a^2 + b^2)^0.5 without destructive underflow or overflow
It solves the Pythagorean theorem a^2 + b^2 = c^2
"""
absA = abs(a)
absB = abs(b)
if absA > absB:
return absA * sqrt(1.0 + (absB / float(absA)) ** 2)
elif absB == 0.0:
return 0.0
else:
return absB * sqrt(1.0 + (absA / float(absB)) ** 2) | Computer c = (a^2 + b^2)^0.5 without destructive underflow or overflow
It solves the Pythagorean theorem a^2 + b^2 = c^2 | entailment |
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data) | Set the matrix values from a two dimensional list. | entailment |
def from_timeseries(cls, timeSeries):
"""Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty.
"""
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx | Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty. | entailment |
def from_two_dim_array(cls, cols, rows, twoDimArray):
"""Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix.
"""
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False) | Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix. | entailment |
def to_multi_dim_timeseries(self):
"""Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
"""
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts | Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries | entailment |
def get_array(self, rowBased=True):
"""Return a two dimensional list with the values of the :py:obj:`self`.
:param boolean rowBased: Indicates wether the returned list should be
row or column based. Has to be True if list[i] should be the i'th
row, False if list[i] should be the i'th column.
:return: Returns a list representing the matrix rows
containing lists representing the columns for each row.
:rtype: list
"""
if rowBased:
array = []
for row in xrange(self._rows):
newRow = []
for col in xrange(self._columns):
newRow.append(self.get_value(col, row))
array.append(newRow)
return array
return copy.deepcopy(self.matrix) | Return a two dimensional list with the values of the :py:obj:`self`.
:param boolean rowBased: Indicates wether the returned list should be
row or column based. Has to be True if list[i] should be the i'th
row, False if list[i] should be the i'th column.
:return: Returns a list representing the matrix rows
containing lists representing the columns for each row.
:rtype: list | entailment |
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix | Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False). | entailment |
def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value | Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange. | entailment |
def invers(self):
"""Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations
"""
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult | Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations | entailment |
def matrix_multiplication(self, matrix):
"""Multiply :py:obj:`self` with the given matrix and return result matrix.
param Matrix matrix: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:note: Make sure, that the matrices can be multiplied.
The number of columns of the Matrix instance must match with
the number of rows of the Matrix given as parameter.
Use is_matrix_mult_possible(matrix) to test.
"""
resultMatrix = Matrix(matrix.get_width(), self.get_height())
for r_row in xrange(self._rows):
for r_col in xrange(matrix.get_width()):
#blockwise matrix multiplication hack
if isinstance(self.get_array()[0][0], Matrix):
blocksize = self.get_array()[0][0].get_width()
valueT = Matrix(blocksize, blocksize)
else:
valueT = 0
for column in xrange(matrix.get_height()):
valueT += self.get_value(column, r_row) * matrix.get_value(r_col, column)
resultMatrix.set_value(r_col, r_row, valueT)
return resultMatrix | Multiply :py:obj:`self` with the given matrix and return result matrix.
param Matrix matrix: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:note: Make sure, that the matrices can be multiplied.
The number of columns of the Matrix instance must match with
the number of rows of the Matrix given as parameter.
Use is_matrix_mult_possible(matrix) to test. | entailment |
def matrix_multiplication_blockwise(self, matrix, blocksize):
"""
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
"""
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten() | http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication | entailment |
def flatten(self):
"""
If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]]
"""
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False) | If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]] | entailment |
def matrix_to_blockmatrix(self, blocksize):
"""
turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix.
"""
if self.get_width() % blocksize or self.get_height() % blocksize:
raise ValueError("Number of rows and columns have to be evenly dividable by blocksize")
selfBlocks = []
for columnIndex in range(0, self.get_width() - 1, blocksize):
for rowIndex in range(0, self.get_height() - 1, blocksize):
currentBlock = []
for blockRows in self.get_array(False)[columnIndex:columnIndex + blocksize]:
currentBlock += blockRows[rowIndex:rowIndex + blocksize]
selfBlocks.append(Matrix(blocksize, blocksize, currentBlock, rowBased=False))
return Matrix(self.get_width() / blocksize, self.get_height() / blocksize, selfBlocks, rowBased=False) | turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix. | entailment |
def multiply(self, multiplicator):
"""Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result | Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix | entailment |
def transform(self):
"""Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix
"""
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix | Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix | entailment |
def gauss_jordan(self):
"""Reduce :py:obj:`self` to row echelon form.
:return: Returns :py:obj:`self` in row echelon form for convenience.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if:
- the matrix rows < columns
- the matrix is not invertible
In this case :py:obj:`self` is not changed.
"""
mArray = self.get_array(rowBased=False)
width = self.get_width()
height = self.get_height()
if not height < width:
raise ValueError("""Not enough rows""")
# Start with complete matrix and remove in each iteration
# the first row and the first column
for offset in xrange(height):
# Switch lines, if current first value is 0
if mArray[offset][offset] == 0:
for i in xrange(offset + 1, height):
if mArray[offset][i] != 0:
tmp = []
for j in xrange(offset, width):
tmp.append(mArray[j][offset])
# tmp = mArray[offset][offset:]
for j in xrange(offset, width):
mArray[j][offset] = mArray[j][i]
mArray[j][i] = tmp[j]
# mArray[offset][offset:] = mArray[i][offset:]
# mArray[i] = tmp
break
currentRow = [mArray[j][offset] for j in xrange(offset, width)]
devider = float(currentRow[0])
# If no line is found with an value != 0
# the matrix is not invertible
if devider == 0:
raise ValueError("Matrix is not invertible")
transformedRow = []
# Devide current row by first element of current row
for value in currentRow:
transformedRow.append(value / devider)
# put transformed row back into matrix
for j in xrange(offset, width):
mArray[j][offset] = transformedRow[j - offset]
# subtract multiples of the current row, from all remaining rows
# in order to become a 0 at the current first column
for i in xrange(offset + 1, height):
multi = mArray[offset][i]
for j in xrange(offset, width):
mArray[j][i] = mArray[j][i] - mArray[j][offset] * multi
for i in xrange(1, height):
# subtract multiples of the i-the row from all above rows
for j in xrange(0, i):
multi = mArray[i][j]
for col in xrange(i, width):
mArray[col][j] = mArray[col][j] - mArray[col][i] * multi
self.matrix = mArray
return self | Reduce :py:obj:`self` to row echelon form.
:return: Returns :py:obj:`self` in row echelon form for convenience.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if:
- the matrix rows < columns
- the matrix is not invertible
In this case :py:obj:`self` is not changed. | entailment |
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v) | Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured | entailment |
def svd(self, maxIteration=50):
"""Return the singular value decomposition of the Matrix instance
:param integer maxIteration: The maximmum number of iterations,
which are executed in the qr decomposition
:return: A tuple with Matrices u, sigma, v with
so that u * sigma * v^T = self
:rtype: tuple
:raise: Raises a :py:exc:`ValueError` if the Matrix object has
more columns than rows
:note: Translation of the FORTRAN implementation if the SVD given
in the NUMERICAL RECIPES IN FORTRAN 77. THE ART OF SCIENTIFIC
COMPUTING.
The algorithm is not yet numerical stable, so the results may
not be in all cases as expected.
"""
if(self.get_width() > self.get_height()):
raise ValueError("Matrix has more columns than rows.")
eps = 1.e-15
tol = 1.e-64 / eps
a = self.get_array(False)
m = len(a[0])
n = len(a)
v = []
for k in xrange(n):
v.append([0.0] * n)
# output diagonal
w = [0.0] * n
# upper diagonal (for bidiagonal form)
rv1 = [0.0] * n
# Householder Reduction to bidiagional form
g = 0.0
anorm = 0.0
for i in xrange(n):
l = i + 1
rv1[i] = g
s = 0.0
# calculate length of relevant row vector in matrix (part of i'th column)
s = sum(a[i][k] ** 2 for k in xrange(i, m))
if s <= tol:
g = 0.0
else:
f = a[i][i]
# square root to get actual length of vector
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[i][i] = f - g
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(i, m))
f = s / h
for k in xrange(i, m):
a[j][k] += (f * a[i][k])
w[i] = g
# calculate length of relevant column vector in matrix (part of i'th row)
s = 0.0
s = sum(a[k][i] ** 2 for k in xrange(l, n))
if s <= tol:
g = 0.0
else:
f = a[l][i]
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[l][i] = f - g
for k in xrange(l, n):
rv1[k] = a[k][i] / h
for j in xrange(l, m):
s = sum(a[k][j] * a[k][i] for k in xrange(l, n))
for k in xrange(l, n):
a[k][j] += (s * rv1[k])
anorm = max(anorm, (abs(w[i]) + abs(rv1[i])))
# Accumulation of right hand transformations
for i in xrange(n - 1, -1, -1):
if g != 0.0:
for j in xrange(l, n):
v[i][j] = a[j][i] / (g * a[i + 1][i])
for j in xrange(l, n):
s = sum(a[k][i] * v[j][k] for k in xrange(l, n))
for k in xrange(l, n):
v[j][k] += (s * v[i][k])
for j in xrange(l, n):
v[j][i] = 0.0
v[i][j] = 0.0
v[i][i] = 1.0
g = rv1[i]
l = i
# Accumulation of left hand transformations
for i in xrange(n - 1, -1, -1):
l = i + 1
g = w[i]
for j in xrange(l, n):
a[j][i] = 0.0
if g != 0.0:
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(l, m))
f = s / (a[i][i] * g)
for k in xrange(i, m):
a[j][k] += f * a[i][k]
for j in xrange(i, m):
a[i][j] /= g
else:
for j in xrange(i, m):
a[i][j] = 0.0
a[i][i] += 1.0
eps *= anorm
# Diagonalization of the bidiagonal form.
# Loop over singular values and over allowed iterations
for k in xrange(n - 1, -1, -1):
for dummy in xrange(maxIteration):
for l in xrange(k, -1, -1):
convergenceTest = False
if abs(rv1[l]) <= eps:
convergenceTest = True
break
if abs(w[l - 1]) <= eps:
# convergenceTest = False (already default)
break
if not convergenceTest:
c = 0.0
s = 1.0
nm = l - 1
for i in xrange(l, k + 1):
f = s * rv1[i]
rv1[i] = c * rv1[i]
if abs(f) <= eps:
break
g = w[i]
h = pythag(f, g)
w[i] = h
c = g / h
s = -f / h
for j in xrange(m):
y = a[nm][j]
z = a[i][j]
a[nm][j] = (y * c) + (z * s)
a[i][j] = -(y * s) + (z * c)
z = w[k]
if l == k:
# convergence
if z < 0.0:
w[k] = -z
for j in xrange(n):
v[k][j] = -v[k][j]
break
x = w[l]
y = w[k - 1]
g = rv1[k - 1]
h = rv1[k]
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y)
g = pythag(f, 1.0)
f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x
c = 1.0
s = 1.0
for i in xrange(l + 1, k + 1):
g = rv1[i]
y = w[i]
h = s * g
g = c * g
z = pythag(f, h)
rv1[i - 1] = z
c = f / z
s = h / z
f = (x * c) + (g * s)
g = -x * s + g * c
h = y * s
y = y * c
for jj in xrange(n):
x = v[i - 1][jj]
z = v[i][jj]
v[i - 1][jj] = (x * c) + (z * s)
v[i][jj] = -(x * s) + (z * c)
z = pythag(f, h)
w[i - 1] = z
if z != 0.0:
z = 1.0 / z
c = f * z
s = h * z
f = (c * g) + (s * y)
x = -s * g + c * y
for jj in xrange(m):
y = a[i - 1][jj]
z = a[i][jj]
a[i - 1][jj] = (y * c) + (z * s)
a[i][jj] = -(y * s) + (z * c)
rv1[l] = 0.0
rv1[k] = f
w[k] = x
# Build Matrix instances for the result
uM = Matrix.from_two_dim_array(len(a), len(a[0]), a)
diagMatrix = Matrix(len(w), len(w))
for i in xrange(len(w)):
diagMatrix.set_value(i, i, w[i])
vM = Matrix.from_two_dim_array(len(v), len(v[0]), v)
return uM, diagMatrix, vM | Return the singular value decomposition of the Matrix instance
:param integer maxIteration: The maximmum number of iterations,
which are executed in the qr decomposition
:return: A tuple with Matrices u, sigma, v with
so that u * sigma * v^T = self
:rtype: tuple
:raise: Raises a :py:exc:`ValueError` if the Matrix object has
more columns than rows
:note: Translation of the FORTRAN implementation if the SVD given
in the NUMERICAL RECIPES IN FORTRAN 77. THE ART OF SCIENTIFIC
COMPUTING.
The algorithm is not yet numerical stable, so the results may
not be in all cases as expected. | entailment |
def pseudoinverse(self):
"""Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
"""
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform() | Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse. | entailment |
def initialize_from_matrix(cls, matrix, column):
"""Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column.
"""
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec | Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column. | entailment |
def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self | Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.