content
stringlengths 5
1.05M
|
|---|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GroupArgs', 'Group']
@pulumi.input_type
class GroupArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
assignable_to_role: Optional[pulumi.Input[bool]] = None,
behaviors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
dynamic_membership: Optional[pulumi.Input['GroupDynamicMembershipArgs']] = None,
mail_enabled: Optional[pulumi.Input[bool]] = None,
mail_nickname: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
provisioning_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_enabled: Optional[pulumi.Input[bool]] = None,
theme: Optional[pulumi.Input[str]] = None,
types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Group resource.
:param pulumi.Input[str] display_name: The display name for the group.
:param pulumi.Input[bool] assignable_to_role: Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] behaviors: A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the group.
:param pulumi.Input['GroupDynamicMembershipArgs'] dynamic_membership: A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
:param pulumi.Input[bool] mail_enabled: Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
:param pulumi.Input[str] mail_nickname: The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
:param pulumi.Input[Sequence[pulumi.Input[str]]] owners: A set of owners who own this group. Supported object types are Users or Service Principals
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provisioning_options: A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
:param pulumi.Input[bool] security_enabled: Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
:param pulumi.Input[str] theme: The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] types: A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
:param pulumi.Input[str] visibility: The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
pulumi.set(__self__, "display_name", display_name)
if assignable_to_role is not None:
pulumi.set(__self__, "assignable_to_role", assignable_to_role)
if behaviors is not None:
pulumi.set(__self__, "behaviors", behaviors)
if description is not None:
pulumi.set(__self__, "description", description)
if dynamic_membership is not None:
pulumi.set(__self__, "dynamic_membership", dynamic_membership)
if mail_enabled is not None:
pulumi.set(__self__, "mail_enabled", mail_enabled)
if mail_nickname is not None:
pulumi.set(__self__, "mail_nickname", mail_nickname)
if members is not None:
pulumi.set(__self__, "members", members)
if owners is not None:
pulumi.set(__self__, "owners", owners)
if prevent_duplicate_names is not None:
pulumi.set(__self__, "prevent_duplicate_names", prevent_duplicate_names)
if provisioning_options is not None:
pulumi.set(__self__, "provisioning_options", provisioning_options)
if security_enabled is not None:
pulumi.set(__self__, "security_enabled", security_enabled)
if theme is not None:
pulumi.set(__self__, "theme", theme)
if types is not None:
pulumi.set(__self__, "types", types)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The display name for the group.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="assignableToRole")
def assignable_to_role(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "assignable_to_role")
@assignable_to_role.setter
def assignable_to_role(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assignable_to_role", value)
@property
@pulumi.getter
def behaviors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "behaviors")
@behaviors.setter
def behaviors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "behaviors", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dynamicMembership")
def dynamic_membership(self) -> Optional[pulumi.Input['GroupDynamicMembershipArgs']]:
"""
A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
"""
return pulumi.get(self, "dynamic_membership")
@dynamic_membership.setter
def dynamic_membership(self, value: Optional[pulumi.Input['GroupDynamicMembershipArgs']]):
pulumi.set(self, "dynamic_membership", value)
@property
@pulumi.getter(name="mailEnabled")
def mail_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
"""
return pulumi.get(self, "mail_enabled")
@mail_enabled.setter
def mail_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "mail_enabled", value)
@property
@pulumi.getter(name="mailNickname")
def mail_nickname(self) -> Optional[pulumi.Input[str]]:
"""
The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "mail_nickname")
@mail_nickname.setter
def mail_nickname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mail_nickname", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of owners who own this group. Supported object types are Users or Service Principals
"""
return pulumi.get(self, "owners")
@owners.setter
def owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "owners", value)
@property
@pulumi.getter(name="preventDuplicateNames")
def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
"""
return pulumi.get(self, "prevent_duplicate_names")
@prevent_duplicate_names.setter
def prevent_duplicate_names(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "prevent_duplicate_names", value)
@property
@pulumi.getter(name="provisioningOptions")
def provisioning_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "provisioning_options")
@provisioning_options.setter
def provisioning_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provisioning_options", value)
@property
@pulumi.getter(name="securityEnabled")
def security_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
"""
return pulumi.get(self, "security_enabled")
@security_enabled.setter
def security_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "security_enabled", value)
@property
@pulumi.getter
def theme(self) -> Optional[pulumi.Input[str]]:
"""
The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
"""
return pulumi.get(self, "theme")
@theme.setter
def theme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "theme", value)
@property
@pulumi.getter
def types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "types")
@types.setter
def types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "types", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
"""
The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
@pulumi.input_type
class _GroupState:
def __init__(__self__, *,
assignable_to_role: Optional[pulumi.Input[bool]] = None,
behaviors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
dynamic_membership: Optional[pulumi.Input['GroupDynamicMembershipArgs']] = None,
mail: Optional[pulumi.Input[str]] = None,
mail_enabled: Optional[pulumi.Input[bool]] = None,
mail_nickname: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
object_id: Optional[pulumi.Input[str]] = None,
onpremises_domain_name: Optional[pulumi.Input[str]] = None,
onpremises_netbios_name: Optional[pulumi.Input[str]] = None,
onpremises_sam_account_name: Optional[pulumi.Input[str]] = None,
onpremises_security_identifier: Optional[pulumi.Input[str]] = None,
onpremises_sync_enabled: Optional[pulumi.Input[bool]] = None,
owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
preferred_language: Optional[pulumi.Input[str]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
provisioning_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_enabled: Optional[pulumi.Input[bool]] = None,
theme: Optional[pulumi.Input[str]] = None,
types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Group resources.
:param pulumi.Input[bool] assignable_to_role: Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] behaviors: A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the group.
:param pulumi.Input[str] display_name: The display name for the group.
:param pulumi.Input['GroupDynamicMembershipArgs'] dynamic_membership: A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
:param pulumi.Input[str] mail: The SMTP address for the group.
:param pulumi.Input[bool] mail_enabled: Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
:param pulumi.Input[str] mail_nickname: The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
:param pulumi.Input[str] object_id: The object ID of the group.
:param pulumi.Input[str] onpremises_domain_name: The on-premises FQDN, also called dnsDomainName, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_netbios_name: The on-premises NetBIOS name, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_sam_account_name: The on-premises SAM account name, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_security_identifier: The on-premises security identifier (SID), synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[bool] onpremises_sync_enabled: Whether this group is synchronised from an on-premises directory (`true`), no longer synchronised (`false`), or has never been synchronised (`null`).
:param pulumi.Input[Sequence[pulumi.Input[str]]] owners: A set of owners who own this group. Supported object types are Users or Service Principals
:param pulumi.Input[str] preferred_language: The preferred language for a Microsoft 365 group, in ISO 639-1 notation.
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provisioning_options: A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] proxy_addresses: List of email addresses for the group that direct to the same group mailbox.
:param pulumi.Input[bool] security_enabled: Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
:param pulumi.Input[str] theme: The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] types: A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
:param pulumi.Input[str] visibility: The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
if assignable_to_role is not None:
pulumi.set(__self__, "assignable_to_role", assignable_to_role)
if behaviors is not None:
pulumi.set(__self__, "behaviors", behaviors)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if dynamic_membership is not None:
pulumi.set(__self__, "dynamic_membership", dynamic_membership)
if mail is not None:
pulumi.set(__self__, "mail", mail)
if mail_enabled is not None:
pulumi.set(__self__, "mail_enabled", mail_enabled)
if mail_nickname is not None:
pulumi.set(__self__, "mail_nickname", mail_nickname)
if members is not None:
pulumi.set(__self__, "members", members)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if onpremises_domain_name is not None:
pulumi.set(__self__, "onpremises_domain_name", onpremises_domain_name)
if onpremises_netbios_name is not None:
pulumi.set(__self__, "onpremises_netbios_name", onpremises_netbios_name)
if onpremises_sam_account_name is not None:
pulumi.set(__self__, "onpremises_sam_account_name", onpremises_sam_account_name)
if onpremises_security_identifier is not None:
pulumi.set(__self__, "onpremises_security_identifier", onpremises_security_identifier)
if onpremises_sync_enabled is not None:
pulumi.set(__self__, "onpremises_sync_enabled", onpremises_sync_enabled)
if owners is not None:
pulumi.set(__self__, "owners", owners)
if preferred_language is not None:
pulumi.set(__self__, "preferred_language", preferred_language)
if prevent_duplicate_names is not None:
pulumi.set(__self__, "prevent_duplicate_names", prevent_duplicate_names)
if provisioning_options is not None:
pulumi.set(__self__, "provisioning_options", provisioning_options)
if proxy_addresses is not None:
pulumi.set(__self__, "proxy_addresses", proxy_addresses)
if security_enabled is not None:
pulumi.set(__self__, "security_enabled", security_enabled)
if theme is not None:
pulumi.set(__self__, "theme", theme)
if types is not None:
pulumi.set(__self__, "types", types)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter(name="assignableToRole")
def assignable_to_role(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "assignable_to_role")
@assignable_to_role.setter
def assignable_to_role(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assignable_to_role", value)
@property
@pulumi.getter
def behaviors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "behaviors")
@behaviors.setter
def behaviors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "behaviors", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for the group.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="dynamicMembership")
def dynamic_membership(self) -> Optional[pulumi.Input['GroupDynamicMembershipArgs']]:
"""
A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
"""
return pulumi.get(self, "dynamic_membership")
@dynamic_membership.setter
def dynamic_membership(self, value: Optional[pulumi.Input['GroupDynamicMembershipArgs']]):
pulumi.set(self, "dynamic_membership", value)
@property
@pulumi.getter
def mail(self) -> Optional[pulumi.Input[str]]:
"""
The SMTP address for the group.
"""
return pulumi.get(self, "mail")
@mail.setter
def mail(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mail", value)
@property
@pulumi.getter(name="mailEnabled")
def mail_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
"""
return pulumi.get(self, "mail_enabled")
@mail_enabled.setter
def mail_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "mail_enabled", value)
@property
@pulumi.getter(name="mailNickname")
def mail_nickname(self) -> Optional[pulumi.Input[str]]:
"""
The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "mail_nickname")
@mail_nickname.setter
def mail_nickname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mail_nickname", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
The object ID of the group.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="onpremisesDomainName")
def onpremises_domain_name(self) -> Optional[pulumi.Input[str]]:
"""
The on-premises FQDN, also called dnsDomainName, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_domain_name")
@onpremises_domain_name.setter
def onpremises_domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "onpremises_domain_name", value)
@property
@pulumi.getter(name="onpremisesNetbiosName")
def onpremises_netbios_name(self) -> Optional[pulumi.Input[str]]:
"""
The on-premises NetBIOS name, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_netbios_name")
@onpremises_netbios_name.setter
def onpremises_netbios_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "onpremises_netbios_name", value)
@property
@pulumi.getter(name="onpremisesSamAccountName")
def onpremises_sam_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The on-premises SAM account name, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_sam_account_name")
@onpremises_sam_account_name.setter
def onpremises_sam_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "onpremises_sam_account_name", value)
@property
@pulumi.getter(name="onpremisesSecurityIdentifier")
def onpremises_security_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The on-premises security identifier (SID), synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_security_identifier")
@onpremises_security_identifier.setter
def onpremises_security_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "onpremises_security_identifier", value)
@property
@pulumi.getter(name="onpremisesSyncEnabled")
def onpremises_sync_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this group is synchronised from an on-premises directory (`true`), no longer synchronised (`false`), or has never been synchronised (`null`).
"""
return pulumi.get(self, "onpremises_sync_enabled")
@onpremises_sync_enabled.setter
def onpremises_sync_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "onpremises_sync_enabled", value)
@property
@pulumi.getter
def owners(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of owners who own this group. Supported object types are Users or Service Principals
"""
return pulumi.get(self, "owners")
@owners.setter
def owners(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "owners", value)
@property
@pulumi.getter(name="preferredLanguage")
def preferred_language(self) -> Optional[pulumi.Input[str]]:
"""
The preferred language for a Microsoft 365 group, in ISO 639-1 notation.
"""
return pulumi.get(self, "preferred_language")
@preferred_language.setter
def preferred_language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_language", value)
@property
@pulumi.getter(name="preventDuplicateNames")
def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
"""
return pulumi.get(self, "prevent_duplicate_names")
@prevent_duplicate_names.setter
def prevent_duplicate_names(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "prevent_duplicate_names", value)
@property
@pulumi.getter(name="provisioningOptions")
def provisioning_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "provisioning_options")
@provisioning_options.setter
def provisioning_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provisioning_options", value)
@property
@pulumi.getter(name="proxyAddresses")
def proxy_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of email addresses for the group that direct to the same group mailbox.
"""
return pulumi.get(self, "proxy_addresses")
@proxy_addresses.setter
def proxy_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "proxy_addresses", value)
@property
@pulumi.getter(name="securityEnabled")
def security_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
"""
return pulumi.get(self, "security_enabled")
@security_enabled.setter
def security_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "security_enabled", value)
@property
@pulumi.getter
def theme(self) -> Optional[pulumi.Input[str]]:
"""
The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
"""
return pulumi.get(self, "theme")
@theme.setter
def theme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "theme", value)
@property
@pulumi.getter
def types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "types")
@types.setter
def types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "types", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
"""
The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
class Group(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
assignable_to_role: Optional[pulumi.Input[bool]] = None,
behaviors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
dynamic_membership: Optional[pulumi.Input[pulumi.InputType['GroupDynamicMembershipArgs']]] = None,
mail_enabled: Optional[pulumi.Input[bool]] = None,
mail_nickname: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
provisioning_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_enabled: Optional[pulumi.Input[bool]] = None,
theme: Optional[pulumi.Input[str]] = None,
types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a group within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this resource.
When authenticated with a service principal, this resource requires one of the following application roles: `Group.ReadWrite.All` or `Directory.ReadWrite.All`
If using the `assignable_to_role` property, this resource additionally requires one of the following application roles: `RoleManagement.ReadWrite.Directory` or `Directory.ReadWrite.All`
When authenticated with a user principal, this resource requires one of the following directory roles: `Groups Administrator`, `User Administrator` or `Global Administrator`
## Import
Groups can be imported using their object ID, e.g.
```sh
$ pulumi import azuread:index/group:Group my_group 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] assignable_to_role: Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] behaviors: A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the group.
:param pulumi.Input[str] display_name: The display name for the group.
:param pulumi.Input[pulumi.InputType['GroupDynamicMembershipArgs']] dynamic_membership: A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
:param pulumi.Input[bool] mail_enabled: Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
:param pulumi.Input[str] mail_nickname: The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
:param pulumi.Input[Sequence[pulumi.Input[str]]] owners: A set of owners who own this group. Supported object types are Users or Service Principals
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provisioning_options: A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
:param pulumi.Input[bool] security_enabled: Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
:param pulumi.Input[str] theme: The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] types: A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
:param pulumi.Input[str] visibility: The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a group within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this resource.
When authenticated with a service principal, this resource requires one of the following application roles: `Group.ReadWrite.All` or `Directory.ReadWrite.All`
If using the `assignable_to_role` property, this resource additionally requires one of the following application roles: `RoleManagement.ReadWrite.Directory` or `Directory.ReadWrite.All`
When authenticated with a user principal, this resource requires one of the following directory roles: `Groups Administrator`, `User Administrator` or `Global Administrator`
## Import
Groups can be imported using their object ID, e.g.
```sh
$ pulumi import azuread:index/group:Group my_group 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param GroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
assignable_to_role: Optional[pulumi.Input[bool]] = None,
behaviors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
dynamic_membership: Optional[pulumi.Input[pulumi.InputType['GroupDynamicMembershipArgs']]] = None,
mail_enabled: Optional[pulumi.Input[bool]] = None,
mail_nickname: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
provisioning_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_enabled: Optional[pulumi.Input[bool]] = None,
theme: Optional[pulumi.Input[str]] = None,
types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupArgs.__new__(GroupArgs)
__props__.__dict__["assignable_to_role"] = assignable_to_role
__props__.__dict__["behaviors"] = behaviors
__props__.__dict__["description"] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["dynamic_membership"] = dynamic_membership
__props__.__dict__["mail_enabled"] = mail_enabled
__props__.__dict__["mail_nickname"] = mail_nickname
__props__.__dict__["members"] = members
__props__.__dict__["owners"] = owners
__props__.__dict__["prevent_duplicate_names"] = prevent_duplicate_names
__props__.__dict__["provisioning_options"] = provisioning_options
__props__.__dict__["security_enabled"] = security_enabled
__props__.__dict__["theme"] = theme
__props__.__dict__["types"] = types
__props__.__dict__["visibility"] = visibility
__props__.__dict__["mail"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["onpremises_domain_name"] = None
__props__.__dict__["onpremises_netbios_name"] = None
__props__.__dict__["onpremises_sam_account_name"] = None
__props__.__dict__["onpremises_security_identifier"] = None
__props__.__dict__["onpremises_sync_enabled"] = None
__props__.__dict__["preferred_language"] = None
__props__.__dict__["proxy_addresses"] = None
super(Group, __self__).__init__(
'azuread:index/group:Group',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
assignable_to_role: Optional[pulumi.Input[bool]] = None,
behaviors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
dynamic_membership: Optional[pulumi.Input[pulumi.InputType['GroupDynamicMembershipArgs']]] = None,
mail: Optional[pulumi.Input[str]] = None,
mail_enabled: Optional[pulumi.Input[bool]] = None,
mail_nickname: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
object_id: Optional[pulumi.Input[str]] = None,
onpremises_domain_name: Optional[pulumi.Input[str]] = None,
onpremises_netbios_name: Optional[pulumi.Input[str]] = None,
onpremises_sam_account_name: Optional[pulumi.Input[str]] = None,
onpremises_security_identifier: Optional[pulumi.Input[str]] = None,
onpremises_sync_enabled: Optional[pulumi.Input[bool]] = None,
owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
preferred_language: Optional[pulumi.Input[str]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
provisioning_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
security_enabled: Optional[pulumi.Input[bool]] = None,
theme: Optional[pulumi.Input[str]] = None,
types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
visibility: Optional[pulumi.Input[str]] = None) -> 'Group':
"""
Get an existing Group resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] assignable_to_role: Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] behaviors: A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description for the group.
:param pulumi.Input[str] display_name: The display name for the group.
:param pulumi.Input[pulumi.InputType['GroupDynamicMembershipArgs']] dynamic_membership: A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
:param pulumi.Input[str] mail: The SMTP address for the group.
:param pulumi.Input[bool] mail_enabled: Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
:param pulumi.Input[str] mail_nickname: The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
:param pulumi.Input[str] object_id: The object ID of the group.
:param pulumi.Input[str] onpremises_domain_name: The on-premises FQDN, also called dnsDomainName, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_netbios_name: The on-premises NetBIOS name, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_sam_account_name: The on-premises SAM account name, synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[str] onpremises_security_identifier: The on-premises security identifier (SID), synchronised from the on-premises directory when Azure AD Connect is used.
:param pulumi.Input[bool] onpremises_sync_enabled: Whether this group is synchronised from an on-premises directory (`true`), no longer synchronised (`false`), or has never been synchronised (`null`).
:param pulumi.Input[Sequence[pulumi.Input[str]]] owners: A set of owners who own this group. Supported object types are Users or Service Principals
:param pulumi.Input[str] preferred_language: The preferred language for a Microsoft 365 group, in ISO 639-1 notation.
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provisioning_options: A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] proxy_addresses: List of email addresses for the group that direct to the same group mailbox.
:param pulumi.Input[bool] security_enabled: Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
:param pulumi.Input[str] theme: The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] types: A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
:param pulumi.Input[str] visibility: The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupState.__new__(_GroupState)
__props__.__dict__["assignable_to_role"] = assignable_to_role
__props__.__dict__["behaviors"] = behaviors
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["dynamic_membership"] = dynamic_membership
__props__.__dict__["mail"] = mail
__props__.__dict__["mail_enabled"] = mail_enabled
__props__.__dict__["mail_nickname"] = mail_nickname
__props__.__dict__["members"] = members
__props__.__dict__["object_id"] = object_id
__props__.__dict__["onpremises_domain_name"] = onpremises_domain_name
__props__.__dict__["onpremises_netbios_name"] = onpremises_netbios_name
__props__.__dict__["onpremises_sam_account_name"] = onpremises_sam_account_name
__props__.__dict__["onpremises_security_identifier"] = onpremises_security_identifier
__props__.__dict__["onpremises_sync_enabled"] = onpremises_sync_enabled
__props__.__dict__["owners"] = owners
__props__.__dict__["preferred_language"] = preferred_language
__props__.__dict__["prevent_duplicate_names"] = prevent_duplicate_names
__props__.__dict__["provisioning_options"] = provisioning_options
__props__.__dict__["proxy_addresses"] = proxy_addresses
__props__.__dict__["security_enabled"] = security_enabled
__props__.__dict__["theme"] = theme
__props__.__dict__["types"] = types
__props__.__dict__["visibility"] = visibility
return Group(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="assignableToRole")
def assignable_to_role(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether this group can be assigned to an Azure Active Directory role. Can only be `true` for security-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "assignable_to_role")
@property
@pulumi.getter
def behaviors(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of behaviors for a Microsoft 365 group. Possible values are `AllowOnlyMembersToPost`, `HideGroupInOutlook`, `SubscribeNewGroupMembers` and `WelcomeEmailDisabled`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for more details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "behaviors")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description for the group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name for the group.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="dynamicMembership")
def dynamic_membership(self) -> pulumi.Output[Optional['outputs.GroupDynamicMembership']]:
"""
A `dynamic_membership` block as documented below. Required when `types` contains `DynamicMembership`. Cannot be used with the `members` property.
"""
return pulumi.get(self, "dynamic_membership")
@property
@pulumi.getter
def mail(self) -> pulumi.Output[str]:
"""
The SMTP address for the group.
"""
return pulumi.get(self, "mail")
@property
@pulumi.getter(name="mailEnabled")
def mail_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the group is a mail enabled, with a shared group mailbox. At least one of `mail_enabled` or `security_enabled` must be specified. Only Microsoft 365 groups can be mail enabled (see the `types` property).
"""
return pulumi.get(self, "mail_enabled")
@property
@pulumi.getter(name="mailNickname")
def mail_nickname(self) -> pulumi.Output[str]:
"""
The mail alias for the group, unique in the organisation. Required for mail-enabled groups. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "mail_nickname")
@property
@pulumi.getter
def members(self) -> pulumi.Output[Sequence[str]]:
"""
A set of members who should be present in this group. Supported object types are Users, Groups or Service Principals. Cannot be used with the `dynamic_membership` block.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Output[str]:
"""
The object ID of the group.
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="onpremisesDomainName")
def onpremises_domain_name(self) -> pulumi.Output[str]:
"""
The on-premises FQDN, also called dnsDomainName, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_domain_name")
@property
@pulumi.getter(name="onpremisesNetbiosName")
def onpremises_netbios_name(self) -> pulumi.Output[str]:
"""
The on-premises NetBIOS name, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_netbios_name")
@property
@pulumi.getter(name="onpremisesSamAccountName")
def onpremises_sam_account_name(self) -> pulumi.Output[str]:
"""
The on-premises SAM account name, synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_sam_account_name")
@property
@pulumi.getter(name="onpremisesSecurityIdentifier")
def onpremises_security_identifier(self) -> pulumi.Output[str]:
"""
The on-premises security identifier (SID), synchronised from the on-premises directory when Azure AD Connect is used.
"""
return pulumi.get(self, "onpremises_security_identifier")
@property
@pulumi.getter(name="onpremisesSyncEnabled")
def onpremises_sync_enabled(self) -> pulumi.Output[bool]:
"""
Whether this group is synchronised from an on-premises directory (`true`), no longer synchronised (`false`), or has never been synchronised (`null`).
"""
return pulumi.get(self, "onpremises_sync_enabled")
@property
@pulumi.getter
def owners(self) -> pulumi.Output[Sequence[str]]:
"""
A set of owners who own this group. Supported object types are Users or Service Principals
"""
return pulumi.get(self, "owners")
@property
@pulumi.getter(name="preferredLanguage")
def preferred_language(self) -> pulumi.Output[str]:
"""
The preferred language for a Microsoft 365 group, in ISO 639-1 notation.
"""
return pulumi.get(self, "preferred_language")
@property
@pulumi.getter(name="preventDuplicateNames")
def prevent_duplicate_names(self) -> pulumi.Output[Optional[bool]]:
"""
If `true`, will return an error if an existing group is found with the same name. Defaults to `false`.
"""
return pulumi.get(self, "prevent_duplicate_names")
@property
@pulumi.getter(name="provisioningOptions")
def provisioning_options(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of provisioning options for a Microsoft 365 group. The only supported value is `Team`. See [official documentation](https://docs.microsoft.com/en-us/graph/group-set-options) for details. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "provisioning_options")
@property
@pulumi.getter(name="proxyAddresses")
def proxy_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
List of email addresses for the group that direct to the same group mailbox.
"""
return pulumi.get(self, "proxy_addresses")
@property
@pulumi.getter(name="securityEnabled")
def security_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the group is a security group for controlling access to in-app resources. At least one of `security_enabled` or `mail_enabled` must be specified. A Microsoft 365 group can be security enabled _and_ mail enabled (see the `types` property).
"""
return pulumi.get(self, "security_enabled")
@property
@pulumi.getter
def theme(self) -> pulumi.Output[Optional[str]]:
"""
The colour theme for a Microsoft 365 group. Possible values are `Blue`, `Green`, `Orange`, `Pink`, `Purple`, `Red` or `Teal`. By default, no theme is set.
"""
return pulumi.get(self, "theme")
@property
@pulumi.getter
def types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of group types to configure for the group. Supported values are `DynamicMembership`, which denotes a group with dynamic membership, and `Unified`, which specifies a Microsoft 365 group. Required when `mail_enabled` is true. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "types")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[str]:
"""
The group join policy and group content visibility. Possible values are `Private`, `Public`, or `Hiddenmembership`. Only Microsoft 365 groups can have `Hiddenmembership` visibility and this value must be set when the group is created. By default, security groups will receive `Private` visibility and Microsoft 365 groups will receive `Public` visibility.
"""
return pulumi.get(self, "visibility")
|
from piComm.transmitter import transmitter_main
from piComm.receiver import receiver_main
if __name__ == '__main__':
option = input("1 - Transmitter | 2 - Receiver | 0 - Exit")
while option != 0:
if option == 1:
transmitter_main()
elif option == 2:
receiver_main()
option = input("1 - Transmitter | 2 - Receiver | 0 - Exit")
|
#!/usr/bin/env python3
# Note that installing `puccini` will also install `ard`
import sys, puccini.tosca, ard
if len(sys.argv) <= 1:
sys.stderr.write('no URL provided\n')
sys.exit(1)
url = sys.argv[1]
try:
clout = puccini.tosca.compile(url)
ard.write(clout, sys.stdout)
except puccini.tosca.Problems as e:
print('Problems:', file=sys.stderr)
for problem in e.problems:
ard.write(problem, sys.stderr)
sys.exit(1)
|
""" Lexer for pascal. """
import re
from ..common import SourceLocation, Token
from ..tools.baselex import SimpleLexer, on
class Lexer(SimpleLexer):
""" Generates a sequence of token from an input stream """
keywords = ['program',
'type', 'const', 'var',
'begin', 'end',
'case', 'of',
'if', 'then', 'else', 'while', 'for', 'do',
'to', 'downto',
'read', 'readln', 'write', 'writeln']
double_glyphs = (
':=', '<>')
single_glyphs = (
',', ';', '(', ')', '.', ':', '<', '>', '=')
glyphs = double_glyphs + single_glyphs
op_txt = '|'.join(re.escape(g) for g in glyphs)
def __init__(self, diag):
super().__init__()
self.diag = diag
def lex(self, input_file):
filename = input_file.name if hasattr(input_file, 'name') else ''
s = input_file.read()
input_file.close()
self.diag.add_source(filename, s)
self.filename = filename
return self.tokenize(s)
def tokenize(self, text):
""" Keeps track of the long comments """
for token in super().tokenize(text):
yield token
loc = SourceLocation(self.filename, self.line, 0, 0)
yield Token('EOF', 'EOF', loc)
@on(r'[ \t\n]+')
def handle_skip(self, val):
pass
@on(r'[A-Za-z_][A-Za-z\d_]*')
def handle_id(self, val):
val = val.lower()
if val in self.keywords:
typ = val
else:
typ = 'ID'
return typ, val
@on(r"'.*?'")
def handle_string(self, val):
return 'STRING', val[1:-1]
@on(r"\d+")
def handle_number(self, val):
return 'NUMBER', int(val)
@on('\(\*.*\*\)', flags=re.DOTALL, order=-2)
def handle_oldcomment(self, val):
pass
@on('\{.*\}', flags=re.DOTALL, order=-1)
def handle_comment(self, val):
pass
@on(op_txt)
def handle_glyph(self, val):
return val, val
|
from dataclasses import dataclass
from bindings.csw.triangle_type import TriangleType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class Triangle(TriangleType):
class Meta:
namespace = "http://www.opengis.net/gml"
|
#!/usr/bin/python
from pymongo import MongoClient
client = MongoClient()
db = client.sherlock_database
stocks = [
{
"resource": {
"classname": "Quote",
"fields": {
"name": "ZICOM ELECTRONIC S INR10",
"price": "47.299999",
"symbol": "ZICOM.NS",
"ts": "1467971998",
"type": "equity",
"utctime": "2016-07-08T09:59:58+0000",
"volume": "55415"
}
}
},
{
"resource": {
"classname": "Quote",
"fields": {
"name": "BHARAT HEAVY ELECTRICALS LTD.",
"price": "137.850006",
"symbol": "BHEL.BO",
"ts": "1467973718",
"type": "equity",
"utctime": "2016-07-08T10:28:38+0000",
"volume": "723332"
}
}
}
]
URL="http://finance.yahoo.com/webservice/v1/symbols/"
FORMAT="/quote?format=json&view=detail"
class Stock:
def __init__(self, symbol):
self.symbol = symbol
def get_price(self):
print "%s%s%s" % (URL, self.symbol, FORMAT)
stock = Stock("AMZN")
stock.get_price()
|
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
sys.path.append('../')
from FollyHighLevelControllerV1 import FollyHighLevelControllerV1
# Initialize Molly position
molly_position = np.array([-4, -8])
# Initialize Folly position
folly_position = np.array([-2, -7.5, 0])
# object length
object_length = 2
# line path constraint
l0 = np.array([-10, -4])
l1 = np.array([4, -10])
# simulation
T = 300 # Total time
dt = 0.2 # time per iteration
sigma = 0.02 # simulation noise standard deviation
# initialize MPC Alg
follyHLC = FollyHighLevelControllerV1(molly_position,
folly_position,
object_length,
l0, l1, # constraint path ends
10, # horizon length
dt, # step time
0.1 # maximum speed
)
# constant molly velocity
molly_velocity_command = np.array([0.01, 0.05])
# total deviation
deviation = 0
prev_time = time.time() # time iteration for real time plot
new_time = time.time()
plt.show(block=False)
for t in range(T):
load_length_deviation = np.linalg.norm(folly_position[0:2] - molly_position) - object_length
deviation += np.abs(load_length_deviation)
plt.clf()
# plot current state
plt.plot([l0[0], l1[0]], [l0[1], l1[1]], 'k--', linewidth=1, label='Path') # constraint path
plt.plot([molly_position[0], folly_position[0]], [molly_position[1], folly_position[1]], 'b-', linewidth=2,
label='Load: dev {:.1f}cm'.format(load_length_deviation * 100)) # object lifted
plt.plot(molly_position[0], molly_position[1], '.', color='olive', label='Molly Position') # MollyPosition
plt.plot(folly_position[0], folly_position[1], 'r.', label='Folly Position') # Folly Actual Positions
plt.legend()
plt.axis('equal')
plt.title('Folly High Level Controller V1 {0}/{1}'.format(t, T))
plt.xlabel('[m]')
plt.ylabel('[m]')
plt.pause(0.01)
time.sleep(np.maximum(dt - (new_time - prev_time), 0.01)) # pause to match real time
prev_time = new_time
new_time = time.time()
# actuate molly command with noise
molly_position = molly_position + dt * molly_velocity_command + dt * np.random.normal(0, sigma, 2)
# compute folly command
folly_velocity_command = follyHLC.update_then_calculate_optimal_actuation(molly_position, folly_position,
molly_velocity_command)
# actuate optimal command with noise
folly_position = folly_position + dt * folly_velocity_command + dt * np.random.normal(0, sigma, 3)
print('The average deviation was {:.1f}cm.'.format(deviation / T * 100))
|
#! /usr/bin/env python3
# vim:ts=4:sw=4:ai:et:si:sts=4
import argparse
import logging
import json
from flask_jsonrpc.proxy import ServiceProxy
import os
import sys
import re
import time
import configparser
FORMAT = "%(asctime)s: %(name)s:%(lineno)d (%(threadName)s) - %(levelname)s - %(message)s"
logging.basicConfig(format=FORMAT)
logging.getLogger(None).setLevel(logging.INFO)
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
progname = os.path.basename(sys.argv[0])
if progname == "rpcclient.py" or progname == "common" or len(progname) < 7:
logger.error("This must be run via a symlink")
sys.exit(1)
# Strip the video_ off the beginning
progname = progname[6:]
# Common config file
configFile = os.path.expanduser("~/.video.cfg")
configData = configparser.ConfigParser()
configData.optionxform = str
try:
configData.read(configFile)
except Exception:
pass
config = {}
try:
config.update({k: configData.get("default", k) for k in configData.options("default")})
except Exception:
pass
try:
config.update({k: configData.get(progname, k) for k in configData.options(progname)})
except Exception:
pass
envVars = {
"VID_PROJECT": "project",
"VID_SERVERIP": "serverIP",
}
config.update({v: os.environ.get(k, None) for (k, v) in envVars.items() if k in os.environ})
nonProjectMethods = ["poll", "list_outstanding"]
parameters = {
"common": {
"arguments": [
{
"args": ["--debug", "-d"],
"kwargs": {
"action": "store_true",
"help": "Use debug mode",
}
},
{
"args": ["--verbose", "-v"],
"kwargs": {
"action": "store_true",
"help": "Use verbose mode",
}
},
{
"args": ["--dryrun", "-n"],
"kwargs": {
"action": "store_true",
"help": "Dry run mode - don't contact RPC server",
}
},
{
"args": ["--project", "-p"],
"kwargs": {
"action": "store",
"required": progname not in nonProjectMethods,
"help": "Video project to upload",
}
},
{
"args": ["--serverIP", "-i"],
"kwargs": {
"action": "store",
"required": True,
"help": "Specify the server's IP",
}
},
{
"args": ["--remoteIP", "-I"],
"kwargs": {
"action": "store",
"default": "",
"help": "Override the client IP",
}
},
{
"args": ['--nopoll'],
"kwargs": {
"action": "store_false",
"dest": "poll",
"help": "Disable the poll for completion",
}
},
],
},
"upload_inputs": {
"description": "Upload input video files to server for processing",
"params": ["project", "remoteIP", "force"],
"arguments": [
{
"args": ["--force", "-f"],
"kwargs": {
"action": "store_true",
"help": "Force deletion of old files on target",
}
}
]
},
"convert_inputs": {
"description": "Convert videos to editable and proxy versions",
"params": ["project", "files", "factor"],
"arguments" : [
{
"args": ["--file", "-f"],
"kwargs": {
"action": "append",
"dest": "files",
"help": "Choose specific files to run (one per --file)",
}
},
{
"args": ["--factor", "-F"],
"kwargs": {
"action": "store",
"type": float,
"default": 0.5,
"help": "Set the shrink factor for proxy files (default %(default)s)",
}
}
]
},
"download_editables": {
"description": "Download editable video files from server for editing",
"params": ["project", "remoteIP", "force"],
"arguments": [
{
"include": "upload_inputs"
}
]
},
"download_proxies": {
"description": "Download proxy video files from server for editing",
"params": ["project", "remoteIP", "force"],
"arguments": [
{
"include": "upload_inputs"
}
]
},
"upload_edl": {
"description": "Upload the EDL to the server",
"params": ["project", "remoteIP", "edlfile"],
"arguments": [
{
"args": ["--edlfile", '-e'],
"kwargs": {
"required": True,
"action": "store",
"help": "The EDL File to send",
}
}
]
},
"upload_proxy_edl": {
"description": "Upload the proxy EDL to the server",
"params": ["project", "remoteIP", "edlfile"],
"arguments": [
{
"include": "upload_edl"
}
]
},
"render_edl": {
"description": "Render the EDL file on the server",
"params": ["project", "outfile", "edlfile", "proxy", "mode"],
"arguments": [
{
"include": "upload_edl"
},
{
"args": ["--outfile", '-o'],
"kwargs": {
"action": "store",
"required": True,
"help": "Set the output filename",
}
},
{
"args": ["--proxy", '-P'],
"kwargs": {
"action": "store_true",
"help": "Render using proxy files",
}
},
{
"args": ["--mode", '-m'],
"kwargs": {
"action": "store",
"choices": ["cinelerra", "pitivi"],
"default": "pitivi",
"help": "Editing mode (default %(default)s)",
},
},
]
},
"upload_to_youtube": {
"description": "Upload the output video to YouTube",
"params": ["project", "outfile", "title", "description", "category",
"keywords"],
"arguments": [
{
"args": ["--outfile", '-o'],
"kwargs": {
"action": "store",
"required": True,
"help": "Set the output filename",
}
},
{
"args": ["--title", "-t"],
"kwargs": {
"action": "store",
"required": True,
"help": "Title for the video",
}
},
{
"args": ["--description", "-D"],
"kwargs": {
"action": "store",
"required": True,
"help": "Description for the video",
}
},
{
"args": ["--category", "-c"],
"kwargs": {
"action": "store",
"default": 28,
"type": int,
"help": "Category for the video (default %(default)s)",
}
},
{
"args": ["--keywords", "-k"],
"kwargs": {
"action": "store",
"required": True,
"help": "Keywords for the video (comma separated)",
}
},
]
},
"archive_to_s3": {
"description": "Archive a project to S3",
"params": ["project", "skip", "inputs", "delete", "accelerate"],
"arguments": [
{
"args": ["--skip", '-s'],
"kwargs": {
"action": "store_true",
"help": "Skip uploading",
}
},
{
"args": ["--inputs"],
"kwargs": {
"action": "store_true",
"help": "Archive inputs too",
}
},
{
"args": ["--delete", '-D'],
"kwargs": {
"action": "store_true",
"help": "Delete project locally after upload",
}
},
{
"args": ["--accelerate", '-a'],
"kwargs": {
"action": "store_true",
"help": "Use S3 Transfer Acceleration",
}
},
],
},
"make_slideshow": {
"description": "Create a slideshow from images",
"params": ["project", "duration", "outfile", "files"],
"arguments": [
{
"args": ["--outfile", '-o'],
"kwargs": {
"action": "store",
"required": True,
"help": "Set the output filename",
}
},
{
"args": ["--duration", "-D"],
"kwargs": {
"action": "store",
"help": "Duration of each image in slideshow",
"type": int,
"default": 5,
}
},
{
"args": ["files"],
"kwargs": {
"nargs": argparse.REMAINDER,
"help": "Image files"
}
},
],
},
"poll": {
"description": "Poll for completion of a task",
"params": ["id"],
"arguments": [
{
"args": ["--id", '-u'],
"kwargs": {
"action": "store",
"required": True,
"help": "Set the id to poll for",
}
}
]
},
"list_outstanding": {
"description": "List outstanding tasks",
"params": [],
"arguments": [
]
}
}
def add_parser_args(parser, progname):
arguments = parameters.get(progname, {}).get("arguments", [])
for arg in arguments:
if "include" in arg:
add_parser_args(parser, arg['include'])
continue
args = arg.get('args', [])
kwargs = arg.get('kwargs', {})
type_ = kwargs.get('type', None)
action = kwargs.get('action', "store")
dests = [item.lstrip("-") for item in args]
for item in dests:
value = config.get(item, None)
if value is not None:
if type_ is not None:
value = type_(value)
if action == "store_true":
value = (value == "True")
if action == "store_false":
value = (value != "False")
kwargs["default"] = value
if kwargs.get('required', False):
kwargs.pop("required", None)
break
parser.add_argument(*args, **kwargs)
def print_response(response):
global verbose
if not verbose:
result = response.get('result', None)
if result is not None:
if isinstance(result, dict):
result = result.get('result', "")
if result:
print(result)
return 0
print(json.dumps(response, indent=2))
if "errors" in response:
return 1
return 0
if progname not in parameters:
logger.error("RPC service %s is not defined" % progname)
sys.exit(1)
parser = argparse.ArgumentParser(prog=progname,
description=parameters[progname].get("description", None))
add_parser_args(parser, 'common')
add_parser_args(parser, progname)
args = parser.parse_args()
if progname in nonProjectMethods:
args.poll = False
if args.debug:
logging.getLogger(None).setLevel(logging.DEBUG)
if hasattr(args, "files") and not args.files:
args.files = []
verbose = args.verbose
config.update(args.__dict__)
logger.info("Config: %s" % config)
if args.dryrun:
sys.exit(0)
apiurl = "http://%s:5005/api" % config.get("serverIP", None)
logger.info("Using service at %s" % apiurl)
proxy = ServiceProxy(apiurl)
apifunc = getattr(proxy.App, progname)
params = parameters[progname].get('params', [])
apiparams = {param: config.get(param, None) for param in params}
if progname != "poll":
print(apifunc)
response = apifunc(**apiparams)
retCode = print_response(response)
if not config.get("poll", False):
sys.exit(retCode)
uuid = response['id']
else:
uuid = config.get("id", None)
sleepTime = 0
while True:
sleepTime = max(min(sleepTime * 2, 256), 1)
logger.info("Sleeping for %ss" % sleepTime)
time.sleep(sleepTime)
response = proxy.App.poll(id=uuid)
retCode = print_response(response)
if retCode:
output = None
break
output = response.get("result", {})
if output.get("status", "complete") == "complete":
break
sys.exit(retCode)
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import math
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
dataset = pd.DataFrame()
model = {}
def load_dataset(filename):
#Load Dataset
global dataset
dataset = pd.read_csv(filename)
def remove_unused_feature(list_features):
global dataset
for feature in list_features:
del dataset[feature]
def replace_nan():
global dataset
dataset = dataset.fillna(-999)
def get_mean(values):
sum = 0
count = 0
for value in values:
sum = sum + value
count = count + 1
return sum/count
def get_std(values):
return np.std(values)
"""
def get_std(values):
sums = 0
sum_power = 0
count = 0
for value in values:
sums = sums + value
sum_power = sum_power + np.power(value,2)
count = count +1
return np.sqrt((count * sums - sum_power)/ (count * (count-1)))
"""
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1/(math.sqrt(2*math.pi)*stdev))*exponent
def build_model(features, labels):
#Summarize Class
print("Class : {0}".format(labels.unique()))
class_prior = dict(zip(labels.value_counts().index, labels.value_counts().values))
data_points = sum(class_prior.values())
class_dict = {}
for class_ in class_prior.keys():
class_dict[class_] = class_prior[class_]
class_prior[class_] = class_prior[class_]/ data_points
model[labels.name] = class_prior
for column in features.columns:
class_details = {}
for class_ in class_prior.keys():
class_detail = features[column][labels.values == class_]
mean = get_mean(class_detail.values)
std = get_std(class_detail.values)
class_details[class_] = {'mean':mean, 'std':std}
model[column] = class_details
return model
def get_predictions(features):
classes = list(model[Y_test.name].keys())
class_predictions = []
for i, row in features.iterrows():
class_prob = []
for class_ in classes:
probabilities = 1
for index, value in row.iteritems():
try:
mean, std = model[index][class_]['mean'], model[index][class_]['std']
probabilities = probabilities * calculateProbability(value, mean, std)
except:
probabilities = probabilities
probabilities = probabilities * model[Y_test.name][class_]
class_prob.append(probabilities)
index_max = np.argmax(class_prob)
class_predictions.append(classes[index_max])
return class_predictions
load_dataset("Dataset/haberman.csv")
replace_nan()
Y = dataset['SurvivalStatus']
X = dataset.iloc[:,:3]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.5, random_state = 42)
model = build_model(X_train, Y_train)
Y_pred= get_predictions(X_test)
accuracy = accuracy_score(Y_test, Y_pred)
report = classification_report(Y_test, Y_pred)
cm = confusion_matrix(Y_test, Y_pred)
|
import os
from tqdm import tqdm
import numpy as np
from ..dataloaders.custom_path import Path
def calculate_weigths_labels(dataset, dataloader, num_classes, server='039614'):
# Create an instance from the data loader
z = np.zeros((num_classes,))
# Initialize tqdm
tqdm_batch = tqdm(dataloader)
print('Calculating classes weights')
for sample in tqdm_batch:
y = sample['label']
y = y.detach().cpu().numpy()
mask = (y >= 0) & (y < num_classes)
labels = y[mask].astype(np.uint8)
count_l = np.bincount(labels, minlength=num_classes)
z += count_l
tqdm_batch.close()
total_frequency = np.sum(z)
class_weights = []
for frequency in z:
class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
class_weights.append(class_weight)
ret = np.array(class_weights)
classes_weights_path = os.path.join(Path.db_root_dir(dataset, server=server), dataset+'_classes_weights.npy')
np.save(classes_weights_path, ret)
return ret
|
#written by ruth
from os import listdir
from string import punctuation
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk import pos_tag
from json import loads,dumps
from random import seed
seed(1000)
class preProcessor(object):
def __init__(self, inputdir, stops, negtags):
#data source for unprocessed text
self.inputdir = inputdir
#stop words
self.stops = stops
#unwanted parts-of-speech tags
self.negtags = negtags
def removePuncs(self, text):
puncs = [i for i in punctuation]
for i in puncs:
text = text.replace(i,"")
return text
def preProcess(self, text,forumwords):
#remove punctuations
text = self.removePuncs(text)
#split text to words
words = [word.strip().lower() for word in text.split()]
#remove stopwords and numbers
stops = self.stops+forumwords
words = [word for word in words if word not in stops and word.isalpha()]
#pos_tag words
tagged = pos_tag(words)
#remove unwanted tags
tagged = [tag for tag in tagged if tag[1] not in self.negtags]
words = [word[0] for word in tagged]
#stem words
words = [PorterStemmer().stem(word) for word in words]
#join words to form sentence
sentence = " ".join([word.strip() for word in words])
return sentence
def processFile(self, inputfile, outputfile, forumwords):
with open(inputfile, "r+") as infile:
inputdata = loads(infile.read())
infile.close()
print("Collected Unprocessed data \nProcessing Data .....\n")
with open(outputfile, "w+") as outfile:
for data in inputdata:
text = data.get("text")
print(text)
text = self.preProcess(text, forumwords=forumwords)
print(text)
if len(text.split()) > 2:
outfile.write(text+"\n")
else:
pass
outfile.close()
def preProcessTwitter(self):
twitterDir = self.inputdir+"twitter/"
files = listdir(twitterDir)
twitterwords=['post', 'tweet']+[i.replace(".json","").lower() for i in files]
with open("outputfiles/"+"twitter.txt", "w+") as outfile:
for i in files:
try:
inputdir = twitterDir+i
with open(inputdir, "r+") as infile:
inputdata = loads(infile.read())
print(inputdata)
infile.close()
for data in inputdata:
text = data.get("text")
if text:
#remove retweets
if "rt" not in text.lower() and len(text.split()) > 2:
text = self.preProcess(text,forumwords=twitterwords)
outfile.write(text+"\n")
else:
pass
else:
pass
print("Processed "+i)
except:
pass
outfile.close()
return 0
def prePreocessStackOverflow(self):
inputfile = "../DataCollection/Krypton/outputfiles/stackoverflow.json"
outputfile = "outputfiles/stack.txt"
forumwords = ['post','question','answer','questions','answers', 'votes','vote','upvote', 'downvote','up','down']
self.processFile(inputfile=inputfile, outputfile=outputfile, forumwords=forumwords)
return 0
def preProcessCwn(self):
inputfile = "../DataCollection/Krypton/outputfiles/cwn.json"
outputfile = "outputfiles/cwn.txt"
self.processFile(inputfile=inputfile, outputfile=outputfile, forumwords=[])
return 0
def preProcessReddit(self):
inputfile = self.inputdir+"reddit.json"
outputfile = "outputfiles/reddit.txt"
forumwords = ['post', 'votes','vote','upvote', 'downvote','up','down', 'sub', 'reddit','subreddit']
self.processFile(inputfile=inputfile, outputfile=outputfile, forumwords=forumwords)
return 0
def prePreocessHackernews(self):
inputfile = "../DataCollection/Krypton/outputfiles/hackernews.json"
outputfile = "outputfiles/hackernews.txt"
forumwords = ['another','day']
self.processFile(inputfile=inputfile, outputfile=outputfile, forumwords=forumwords)
return 0
if __name__ == "__main__":
inputdir = "../DataCollection/Krypton/outputfiles/"
negtags = ["NN", "NNP", "NNPS", "POS", "PRP", "PRP$", "WP", "WP$", "IN", "EX", "CC", "DT", "PDT", "WDT","TO", "RP","FW", "MD", "SYM"]
stops = [stop.lower() for stop in list(set(stopwords.words("english")))]
pp = preProcessor(inputdir=inputdir, stops=stops,negtags=negtags)
'''
pp.preProcessReddit()
pp.preProcessCwn()
pp.preProcessTwitter()
pp.prePreocessStackOverflow()
'''
pp.prePreocessHackernews()
|
import pso
import os
particles = [5, 10, 20, 30, 40, 50, 70, 75, 100, 150, 200, 500]
for n in particles:
os.system('mkdir results/p%d_i50 && python main.py --infile "data/scg_gawad/pat1.txt" --mutations 20 --mutfile "data/scg_gawad/pat1_mut.txt" --particles %d --iterations 50 >> results/p%d_i50/result.txt && mv results/average_particle_time.png results/average_iteration_particle_time.png results/best.gv results/best.gv.png results/p%d_i50' % (n, n, n, n))
|
from flask_restful import current_app
from common.database import db
import datetime
import string
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Sequence, exc
def create_db():
db.create_all()
def run():
create_db()
|
"""
Simple script generate the static HTML for the Bokeh climate zone map
Requirements:
- python >= 3.7
- bokeh
- ???
"""
import json
from pathlib import Path
from bokeh.embed import autoload_static
from bokeh.models import GeoJSONDataSource, WMTSTileSource
from bokeh.plotting import figure
from bokeh.resources import CDN
COLORS = [
"#e41a1c",
"#377eb8",
"#4daf4a",
"#984ea3",
"#ff7f00",
"#ffff33",
"#f781bf",
"#999999",
"#a65628",
]
FILL_ALPHA = 0.4
def main():
tile_provider = WMTSTileSource(
url=(
"https://stamen-tiles.a.ssl.fastly.net/terrain"
"/{Z}/{X}/{Y}.png"
),
attribution=(
'Map tiles by <a href="http://stamen.com">Stamen Design</a>, '
'under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0'
'</a>. Map data by <a href="http://openstreetmap.org">OpenStreetMap'
'</a>, under <a href="https://creativecommons.org/licenses/by-sa/3.0">'
"CC BY SA</a>"
),
)
fig = figure(
x_axis_type="mercator",
y_axis_type="mercator",
title="Solar Forecast Arbiter Climate Zones",
height=1241,
width=1831,
sizing_mode="scale_width",
x_range=(-14549121.9, -6848522.3),
y_range=(1705458.7, 6924707.2),
tooltips=[("Region", "@region")],
tools="pan,wheel_zoom,box_zoom,reset,hover,help",
)
fig.title.text_font_size = "24px"
fig.add_tile(tile_provider)
for i, area in enumerate(sorted(Path("./assets/climate_zones").glob("*.webm.geojson"))):
geodata = {
"type": "FeatureCollection",
"name": "climate_zones",
"crs": {
"type": "name",
"properties": {"name": "urn:ogc:def:crs:EPSG::3857"},
},
"features": [],
}
region = area.name.split("_")[-1].split(".")[0]
with open(area, "r") as f:
gj = json.load(f)
for feat in gj["features"]:
feat["properties"]["region"] = region
feat["properties"]["geojson"] = str(area)
geodata["features"].append(feat)
geo_source = GeoJSONDataSource(
geojson=json.dumps(geodata), name=f"geo_source_{region}"
)
fig.patches(
xs="xs",
ys="ys",
color=COLORS[i],
legend=f"Region {region}",
source=geo_source,
fill_alpha=FILL_ALPHA,
)
fig.legend.click_policy = "hide"
js_path = Path("assets/climate_zones/climate_zones.js")
js, tag = autoload_static(fig, CDN, str(".." / js_path))
with open(js_path, "w") as f:
f.write(js)
with open("_includes/climate_zones.html", "w") as f:
f.write(tag)
if __name__ == "__main__":
main()
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the implementations of the various numerical integrators.
Higher order methods mostly taken from [1].
References:
[1] Leimkuhler, Benedict and Sebastian Reich. Simulating hamiltonian dynamics.
Vol. 14. Cambridge university press, 2004.
[2] Forest, Etienne and Ronald D. Ruth. Fourth-order symplectic integration.
Physica D: Nonlinear Phenomena 43.1 (1990): 105-117.
[3] Blanes, Sergio and Per Christian Moan. Practical symplectic partitioned
Runge–Kutta and Runge–Kutta–Nyström methods. Journal of Computational and
Applied Mathematics 142.2 (2002): 313-330.
[4] McLachlan, Robert I. On the numerical integration of ordinary differential
equations by symmetric composition methods. SIAM Journal on Scientific
Computing 16.1 (1995): 151-168.
[5] Yoshida, Haruo. Construction of higher order symplectic integrators.
Physics letters A 150.5-7 (1990): 262-268.
[6] Süli, Endre; Mayers, David (2003), An Introduction to Numerical Analysis,
Cambridge University Press, ISBN 0-521-00794-1.
[7] Hairer, Ernst; Nørsett, Syvert Paul; Wanner, Gerhard (1993), Solving
ordinary differential equations I: Nonstiff problems, Berlin, New York:
Springer-Verlag, ISBN 978-3-540-56670-0.
"""
from typing import Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import jax
from jax import lax
from jax.experimental import ode
import jax.numpy as jnp
import numpy as np
M = TypeVar("M")
TM = TypeVar("TM")
TimeInterval = Union[jnp.ndarray, Tuple[float, float]]
# _____ _
# / ____| | |
# | | __ ___ _ __ ___ _ __ __ _| |
# | | |_ |/ _ \ '_ \ / _ \ '__/ _` | |
# | |__| | __/ | | | __/ | | (_| | |
# \_____|\___|_| |_|\___|_| \__,_|_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
GeneralTangentFunction = Callable[
[
Optional[Union[float, jnp.ndarray]], # t
M # y
],
TM # dy_dt
]
GeneralIntegrator = Callable[
[
GeneralTangentFunction,
Optional[Union[float, jnp.ndarray]], # t
M, # y
jnp.ndarray, # dt
],
M # y_next
]
def solve_ivp_dt(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This makes
them easy to run with a fixed amount of computation and ensures solutions are
easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
y0: an instance of `M`
Initial state at `t_span[0]`.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if method == "adaptive":
ndim = y0.q.ndim if isinstance(y0, phase_space.PhaseSpace) else y0.ndim
signs = jnp.asarray(jnp.sign(dt))
signs = signs.reshape([-1] + [1] * (ndim - 1))
if isinstance(dt, float) or dt.ndim == 0:
true_t_eval = t0 + dt * np.arange(1, num_steps + 1)
else:
true_t_eval = t0 + dt[None] * np.arange(1, num_steps + 1)[:, None]
if isinstance(dt, float):
dt = np.asarray(dt)
if isinstance(dt, np.ndarray) and dt.ndim > 0:
if np.all(np.abs(dt) != np.abs(dt[0])):
raise ValueError("Not all values of `dt` where the same.")
elif isinstance(dt, jnp.ndarray) and dt.ndim > 0:
raise ValueError("The code here works only when `dy_dt` is time "
"independent and `np.abs(dt)` is the same. For this we "
"allow calling this only with numpy (not jax.numpy) "
"arrays.")
dt: jnp.ndarray = jnp.abs(jnp.asarray(dt))
dt = dt.reshape([-1])[0]
t_eval = t0 + dt * np.arange(num_steps + 1)
outputs = ode.odeint(
func=lambda y_, t_: fun(None, y_) * signs,
y0=y0,
t=jnp.abs(t_eval - t0),
**(ode_int_kwargs or dict())
)
# Note that we do not return the initial point
return true_t_eval, jax.tree_map(lambda x: x[1:], outputs)
method = get_integrator(method)
if num_steps is not None:
dt = jnp.repeat(jnp.asarray(dt)[None], repeats=num_steps, axis=0)
t_eval = t0 + jnp.cumsum(dt, axis=0)
t0 = jnp.ones_like(t_eval[..., :1]) * t0
t = jnp.concatenate([t0, t_eval[..., :-1]], axis=-1)
def loop_body(y_: M, t_dt: Tuple[jnp.ndarray, jnp.ndarray]) -> Tuple[M, M]:
t_, dt_ = t_dt
dt_: jnp.ndarray = dt_ / steps_per_dt
for _ in range(steps_per_dt):
y_ = method(fun, t_, y_, dt_)
t_ = t_ + dt_
return y_, y_
if use_scan:
return t_eval, lax.scan(loop_body, init=y0, xs=(t, dt))[1]
else:
y = [y0]
for t_and_dt_i in zip(t, dt):
y.append(loop_body(y[-1], t_and_dt_i)[0])
# Note that we do not return the initial point
return t_eval, jax.tree_multimap(lambda *args: jnp.stack(args, axis=0),
*y[1:])
def solve_ivp_dt_two_directions(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool = True,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> M:
"""Equivalent to `solve_ivp_dt` but you can specify unrolling the problem for a fixed number of steps in both time directions."""
yt = []
if num_steps_backward > 0:
yt_bck = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=- dt,
method=method,
num_steps=num_steps_backward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(jax.tree_map(lambda x: jnp.flip(x, axis=0), yt_bck))
if include_y0:
yt.append(jax.tree_map(lambda x: x[None], y0))
if num_steps_forward > 0:
yt_fwd = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps_forward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(yt_fwd)
if len(yt) > 1:
return jax.tree_multimap(lambda *a: jnp.concatenate(a, axis=0), *yt)
else:
return yt[0]
def solve_ivp_t_eval(
fun: GeneralTangentFunction,
t_span: TimeInterval,
y0: M,
method: Union[str, GeneralIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using an explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
# Check for t_eval
if t_eval is None:
t_eval = np.asarray([t_span[-1]])
if isinstance(t_span[0], float) and isinstance(t_span[1], float):
t_span = np.asarray(t_span)
elif isinstance(t_span[0], float) and isinstance(t_span[1], jnp.ndarray):
t_span = (np.full_like(t_span[1], t_span[0]), t_span[1])
t_span = np.stack(t_span, axis=0)
elif isinstance(t_span[1], float) and isinstance(t_span[0], jnp.ndarray):
t_span = (t_span[0], jnp.full_like(t_span[0], t_span[1]))
t_span = np.stack(t_span, axis=0)
else:
t_span = np.stack(t_span, axis=0)
def check_span(span, ts):
# Verify t_span and t_eval
if span[0] < span[1]:
# Forward in time
if not np.all(np.logical_and(span[0] <= ts, ts <= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] < ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
else:
# Backward in time
if not np.all(np.logical_and(span[0] >= ts, ts >= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] > ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
if t_span.ndim == 1:
check_span(t_span, t_eval)
elif t_span.ndim == 2:
if t_eval.ndim != 2:
raise ValueError("t_eval should have rank 2.")
for i in range(t_span.shape[1]):
check_span(t_span[:, i], t_eval[:, i])
t = np.concatenate([t_span[:1], t_eval[:-1]], axis=0)
return solve_ivp_dt(
fun=fun,
y0=y0,
t0=t_span[0],
dt=t_eval - t,
method=method,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class RungaKutta(GeneralIntegrator):
"""A general Runga-Kutta integrator defined using a Butcher tableau."""
def __init__(
self,
a_tableau: Sequence[Sequence[float]],
b_tableau: Sequence[float],
c_tableau: Sequence[float],
order: int):
if len(b_tableau) != len(c_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of c_tableau.")
if len(b_tableau) != len(a_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of a_tableau.")
self.a_tableau = a_tableau
self.b_tableau = b_tableau
self.c_tableau = c_tableau
self.order = order
def __call__(
self,
tangent_func: GeneralTangentFunction,
t: jnp.ndarray,
y: M,
dt: jnp.ndarray
) -> M: # pytype: disable=invalid-annotation
k = [tangent_func(t, y)]
zero = jax.tree_map(jnp.zeros_like, k[0])
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (y.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (y.ndim - t.ndim))
for c_n, a_n_row in zip(self.c_tableau, self.a_tableau):
t_n = t + dt * c_n
products = [a_i * k_i for a_i, k_i in zip(a_n_row, k) if a_i != 0.0]
delta_n = sum(products, zero)
y_n = y + dt * delta_n
k.append(tangent_func(t_n, y_n))
products = [b_i * k_i for b_i, k_i in zip(self.b_tableau, k) if b_i != 0.0]
delta = sum(products, zero)
return y + dt * delta
class GeneralEuler(RungaKutta):
"""The standard Euler method (for general ODE problems)."""
def __init__(self):
super().__init__(
a_tableau=[],
b_tableau=[1.0],
c_tableau=[],
order=1
)
class RungaKutta2(RungaKutta):
"""The second order Runga-Kutta method corresponding to the mid-point rule."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0]],
b_tableau=[0.0, 1.0],
c_tableau=[1.0 / 2.0],
order=2
)
class RungaKutta4(RungaKutta):
"""The fourth order Runga-Kutta method from [6]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0],
[0.0, 1.0 / 2.0],
[0.0, 0.0, 1.0]],
b_tableau=[1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0],
c_tableau=[1.0 / 2.0, 1.0 / 2.0, 1.0],
order=4
)
class RungaKutta38(RungaKutta):
"""The fourth order 3/8 rule Runga-Kutta method from [7]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 3.0],
[-1.0 / 3.0, 1.0],
[1.0, -1.0, 1.0]],
b_tableau=[1.0 / 8.0, 3.0 / 8.0, 3.0 / 8.0, 1.0 / 8.0],
c_tableau=[1.0 / 3.0, 2.0 / 3.0, 1.0],
order=4
)
# _____ _ _ _
# / ____| | | | | (_)
# | (___ _ _ _ __ ___ _ __ | | ___ ___| |_ _ ___
# \___ \| | | | '_ ` _ \| '_ \| |/ _ \/ __| __| |/ __|
# ____) | |_| | | | | | | |_) | | __/ (__| |_| | (__
# |_____/ \__, |_| |_| |_| .__/|_|\___|\___|\__|_|\___|
# __/ | | |
# |___/ |_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
SymplecticIntegrator = Callable[
[
phase_space.SymplecticTangentFunction,
jnp.ndarray, # t
phase_space.PhaseSpace, # (q, p)
jnp.ndarray, # dt
],
phase_space.PhaseSpace # (q_next, p_next)
]
def solve_hamiltonian_ivp_dt(
hamiltonian: phase_space.HamiltonianFunction,
y0: phase_space.PhaseSpace,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, SymplecticIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
y0: an instance of `M`
Initial state at t=t0.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
return solve_ivp_dt(
fun=dy_dt,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
def solve_hamiltonian_ivp_t_eval(
hamiltonian: phase_space.HamiltonianFunction,
t_span: TimeInterval,
y0: phase_space.PhaseSpace,
method: Union[str, SymplecticIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra argumrnts to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
if method == "adaptive":
dy_dt = phase_space.transform_symplectic_tangent_function_using_array(dy_dt)
return solve_ivp_t_eval(
fun=dy_dt,
t_span=t_span,
y0=y0,
method=method,
t_eval=t_eval,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class CompositionSymplectic(SymplecticIntegrator):
"""A generalized symplectic integrator based on compositions.
Simulates Hamiltonian dynamics using a composition of symplectic steps:
q_{0} = q_init, p_{0} = p_init
for i in [1, n]:
p_{i+1} = p_{i} - c_{i} * dH/dq(q_{i}) * dt
q_{i+1} = q_{i} + d_{i} * dH/dp(p_{i+1}) * dt
q_next = q_{n}, p_next = p_{n}
This integrator always starts with updating the momentum.
The order argument is used mainly for testing to estimate the error when
integrating various systems.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
order: int):
if len(position_coefficients) != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients and "
"position_coefficients must be the same.")
if not np.allclose(sum(position_coefficients), 1.0):
raise ValueError("The sum of the position_coefficients "
"must be equal to 1.")
if not np.allclose(sum(momentum_coefficients), 1.0):
raise ValueError("The sum of the momentum_coefficients "
"must be equal to 1.")
self.momentum_coefficients = momentum_coefficients
self.position_coefficients = position_coefficients
self.order = order
def __call__(
self,
tangent_func: phase_space.SymplecticTangentFunction,
t: jnp.ndarray,
y: phase_space.PhaseSpace,
dt: jnp.ndarray
) -> phase_space.PhaseSpace:
q, p = y.q, y.p
# This is intentional to prevent a bug where one uses y later
del y
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (q.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (q.ndim - t.ndim))
t_q = t
t_p = t
for c, d in zip(self.momentum_coefficients, self.position_coefficients):
# Update momentum
if c != 0.0:
dp_dt = tangent_func(t_p, phase_space.PhaseSpace(q, p)).p
p = p + c * dt * dp_dt
t_p = t_p + c * dt
# Update position
if d != 0.0:
dq_dt = tangent_func(t_q, phase_space.PhaseSpace(q, p)).q
q = q + d * dt * dq_dt
t_q = t_q + d * dt
return phase_space.PhaseSpace(position=q, momentum=p)
class SymplecticEuler(CompositionSymplectic):
"""The symplectic Euler method (for Hamiltonian systems).
If position_first = True:
q_{t+1} = q_{t} + dH/dp(p_{t}) * dt
p_{t+1} = p_{t} - dH/dq(q_{t+1}) * dt
else:
p_{t+1} = p_{t} - dH/dq(q_{t}) * dt
q_{t+1} = q_{t} + dH/dp(p_{t+1}) * dt
"""
def __init__(self, position_first=True):
if position_first:
super().__init__(
momentum_coefficients=[0.0, 1.0],
position_coefficients=[1.0, 0.0],
order=1
)
else:
super().__init__(
momentum_coefficients=[1.0],
position_coefficients=[1.0],
order=1
)
class SymmetricCompositionSymplectic(CompositionSymplectic):
"""A generalized composition integrator that is symmetric.
The integrators produced are always of the form:
[update_q, update_p, ..., update_p, update_q]
or
[update_p, update_q, ..., update_q, update_p]
based on the position_first argument. The method will expect which ever is
updated first to have one more coefficient.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
position_first: bool,
order: int):
position_coefficients = list(position_coefficients)
momentum_coefficients = list(momentum_coefficients)
if position_first:
if len(position_coefficients) != len(momentum_coefficients) + 1:
raise ValueError("The number of position_coefficients must be one more "
"than momentum_coefficients when position_first=True.")
momentum_coefficients = [0.0] + momentum_coefficients
else:
if len(position_coefficients) + 1 != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients must be one more "
"than position_coefficients when position_first=True.")
position_coefficients = position_coefficients + [0.0]
super().__init__(
position_coefficients=position_coefficients,
momentum_coefficients=momentum_coefficients,
order=order
)
def symmetrize_coefficients(
coefficients: Sequence[float],
odd_number: bool
) -> Sequence[float]:
"""Symmetrizes the coefficients for an integrator."""
coefficients = list(coefficients)
if odd_number:
final = 1.0 - 2.0 * sum(coefficients)
return coefficients + [final] + coefficients[::-1]
else:
final = 0.5 - sum(coefficients)
return coefficients + [final, final] + coefficients[::-1]
class LeapFrog(SymmetricCompositionSymplectic):
"""The standard Leap-Frog method (also known as Stormer-Verlet).
If position_first = True:
q_half = q_{t} + dH/dp(p_{t}) * dt / 2
p_{t+1} = p_{t} - dH/dq(q_half) * dt
q_{t+1} = q_half + dH/dp(p_{t+1}) * dt / 2
else:
p_half = p_{t} - dH/dq(q_{t}) * dt / 2
q_{t+1} = q_{t} + dH/dp(p_half) * dt
p_{t+1} = p_half - dH/dq(q_{t+1}) * dt / 2
"""
def __init__(self, position_first=False):
if position_first:
super().__init__(
position_coefficients=[0.5, 0.5],
momentum_coefficients=[1.0],
position_first=True,
order=2
)
else:
super().__init__(
position_coefficients=[1.0],
momentum_coefficients=[0.5, 0.5],
position_first=False,
order=2
)
class Ruth4(SymmetricCompositionSymplectic):
"""The Fourth order method from [2]."""
def __init__(self):
cbrt_2 = float(np.cbrt(2.0))
c = [1.0 / (2.0 - cbrt_2)]
# 3: [c1, 1.0 - 2*c1, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [1.0 / (4.0 - 2.0 * cbrt_2)]
# 4: [d1, 0.5 - d1, 0.5 - d1, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=True,
order=4
)
class Symmetric4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0792036964311957, 0.353172906049774, -0.0420650803577195]
# 7 : [c1, c2, c3, 1.0 - c1 - c2 - c3, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.209515106613362, -0.143851773179818]
# 6: [d1, d2, 0.5 - d1, 0.5 - d1, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
class Symmetric6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0502627644003922, 0.413514300428344, 0.0450798897943977,
-0.188054853819569, 0.541960678450780]
# 11 : [c1, c2, c3, c4, c5, 1.0 - sum(ci), c5, c4, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.148816447901042, -0.132385865767784, 0.067307604692185,
0.432666402578175]
# 10: [d1, d2, d3, d4, 0.5 - sum(di), 0.5 - sum(di), d4, d3, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
def coefficients_based_on_composing_second_order(
weights: Sequence[float]
) -> Tuple[Sequence[float], Sequence[float]]:
"""Constructs the coefficients for methods based on second-order schemes."""
coefficients_0 = []
coefficients_1 = []
coefficients_0.append(weights[0] / 2.0)
for i in range(len(weights) - 1):
coefficients_1.append(weights[i])
coefficients_0.append((weights[i] + weights[i + 1]) / 2.0)
coefficients_1.append(weights[-1])
coefficients_0.append(weights[-1] / 2.0)
return coefficients_0, coefficients_1
class SymmetricSo4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.28, 0.62546642846767004501]
# 5
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=4
)
class SymmetricSo6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.2 in [1] (originally from [5])."""
def __init__(self, position_first: bool = False):
w = [0.78451361047755726382, 0.23557321335935813368,
-1.17767998417887100695]
# 7
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=6
)
class SymmetricSo8(SymmetricCompositionSymplectic):
"""The eighth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.74167036435061295345, -0.40910082580003159400,
0.19075471029623837995, -0.57386247111608226666,
0.29906418130365592384, 0.33462491824529818378,
0.31529309239676659663]
# 15
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=8
)
general_integrators = dict(
general_euler=GeneralEuler(),
rk2=RungaKutta2(),
rk4=RungaKutta4(),
rk38=RungaKutta38()
)
symplectic_integrators = dict(
symp_euler=SymplecticEuler(position_first=True),
symp_euler_q=SymplecticEuler(position_first=True),
symp_euler_p=SymplecticEuler(position_first=False),
leap_frog=LeapFrog(position_first=False),
leap_frog_q=LeapFrog(position_first=True),
leap_frog_p=LeapFrog(position_first=False),
stormer_verlet=LeapFrog(position_first=False),
stormer_verlet_q=LeapFrog(position_first=True),
stormer_verlet_p=LeapFrog(position_first=False),
ruth4=Ruth4(),
sym4=Symmetric4(),
sym6=Symmetric6(),
so4=SymmetricSo4(position_first=False),
so4_q=SymmetricSo4(position_first=True),
so4_p=SymmetricSo4(position_first=False),
so6=SymmetricSo6(position_first=False),
so6_q=SymmetricSo6(position_first=True),
so6_p=SymmetricSo6(position_first=False),
so8=SymmetricSo8(position_first=False),
so8_q=SymmetricSo8(position_first=True),
so8_p=SymmetricSo8(position_first=False),
)
def get_integrator(
name_or_callable: Union[str, GeneralIntegrator]
) -> GeneralIntegrator:
"""Returns any integrator with the provided name or the argument."""
if isinstance(name_or_callable, str):
if name_or_callable in general_integrators:
return general_integrators[name_or_callable]
elif name_or_callable in symplectic_integrators:
return symplectic_integrators[name_or_callable]
else:
raise ValueError(f"Unrecognized integrator with name {name_or_callable}.")
if not callable(name_or_callable):
raise ValueError(f"Expected a callable, but got {type(name_or_callable)}.")
return name_or_callable
|
from bll import GameControl
class GameView:
def __init__(self):
self.controller = GameControl()
def show_menu(self):
print("按下w上滑")
print("按下s下滑")
print("按下a左滑")
print("按下d右滑")
def select_menu(self, map):
result = input("请输入你的选项")
if result == "w":
self.controller.upslide(map)
if result == "a":
self.controller.leftslide(map)
if result == "s":
self.controller.downslide(map)
if result == "d":
self.controller.rightslide(map)
|
#from .symbol_vgg import *
from .symbol_ssh import *
|
inp = input('European floor? ')
usf = int(inp) + 1
print('US floor number is:', usf)
|
from django.contrib import admin
from .models import *
class EventAdmin(admin.ModelAdmin):
model = Event
list_display = ['event_cod', 'event_nom', 'event_date_init', 'even_date_end', 'event_site', 'event_url']
class MagazineAdmin(admin.ModelAdmin):
model = Magazine
list_display = ['magazine_code', 'magazine_name', 'magazine_date', 'magazine_arb', 'magazine_cenditel', 'magazine_url']
class CourseAdmin(admin.ModelAdmin):
model = Course
list_display = ['course_code', 'course_name', 'course_date_init', 'course_date_end']
class ProjectAdmin(admin.ModelAdmin):
model = Project
list_display = ['project_code', 'project_name', 'project_date_init', 'project_date_end', 'project_poa']
class BookAdmin(admin.ModelAdmin):
models = Book
list_display = ['book_code', 'book_name', 'book_date', 'book_cenditel', 'book_url']
class ParticipantAdmin(admin.ModelAdmin):
models = Participant
list_display = ['participant_code', 'participant_document', 'participant_name', 'participant_lastname', 'participant_email']
class ResearcherAdmin(admin.ModelAdmin):
models = Researcher
list_display = ['researcher_code', 'researcher_document', 'researcher_name', 'researcher_lastname', 'reseracher_active']
admin.site.register(Event, EventAdmin)
admin.site.register(Magazine, MagazineAdmin)
admin.site.register(Course, CourseAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(Researcher, ResearcherAdmin)
|
import time
import random
import logging
logger = logging.getLogger(__name__)
from bs4 import BeautifulSoup
import zlib
from fbscraper.settings import (
DEFAULT_MAX_TRY_TIMES,
DEFAULT_SHOULD_USE_ORIGINAL_URL,
STATUS_SUCCESS,
)
import fbscraper.facebook as fb
class DiscoverCrawler:
def __init__(
self,
site_url,
site_id,
browser,
existing_article_urls,
db,
limit_sec,
max_try_times=DEFAULT_MAX_TRY_TIMES,
should_use_original_url=DEFAULT_SHOULD_USE_ORIGINAL_URL,
):
self.site_url = site_url
self.site_id = site_id
self.browser = browser
self.existing_article_urls = existing_article_urls
self.max_try_times = max_try_times if max_try_times else DEFAULT_MAX_TRY_TIMES
self.db = db
self.should_use_original_url = should_use_original_url
self.limit_sec = limit_sec
self.start_at = None
def crawl_and_save(self):
self.start_at = int(time.time())
self.enter_site()
self.expand_page_and_insert_article()
def enter_site(self):
post_root_url = self.site_url
if not self.should_use_original_url:
if post_root_url.endswith("/"):
post_root_url += "posts"
else:
post_root_url += "/posts"
self.browser.get(post_root_url)
fb.raise_if_security_check(self.browser)
time.sleep(random.uniform(2, 3))
def expand_page_and_insert_article(self):
viewed_count = 0
new_count = 0
empty_count = 0
while (
int(time.time()) - self.start_at
) < self.limit_sec and empty_count < self.max_try_times:
self.log_crawler(
viewed_count, new_count, len(self.existing_article_urls), empty_count
)
self.browser.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
time.sleep(random.uniform(2, 3))
post_urls = self.get_post_urls_from_html(html=self.browser.page_source)
viewed_count = len(post_urls)
new_post_urls = self.remove_old_post_urls(post_urls)
new_count = len(new_post_urls)
if new_count == 0:
if viewed_count < len(self.existing_article_urls):
continue
else:
empty_count += 1
else:
for p_url in new_post_urls:
if p_url:
article_id = self.insert_article(p_url)
self.log_pipeline(article_id)
# reset empty count check when new_count > 0
empty_count = 0
self.existing_article_urls += new_post_urls
crawled_time = int(time.time()) - self.start_at
time_status = f"[discover crawler - expand_page_and_insert_article] LimitSec: {self.limit_sec}, Crawled: {crawled_time}. is_over_limit_sec={self.limit_sec < crawled_time}"
logger.debug(time_status)
def remove_old_post_urls(self, post_urls):
return list(set(post_urls) - set(self.existing_article_urls))
def get_post_urls_from_html(self, html):
soup = BeautifulSoup(html, "html.parser")
post_elements = soup.find_all("div", {"class": "userContentWrapper"})
return [self.extract_post_url_from_element(post) for post in post_elements]
@staticmethod
def extract_post_url_from_element(post):
result = None
anchors = post.select('[data-testid="story-subtitle"] a')
for index, anchor in enumerate(anchors):
hasTimestamp = len(anchor.select("abbr > span.timestampContent")) > 0
if hasTimestamp:
url = anchor.get("href")
if url:
url_info = fb.get_facebook_url_info(url)
if url_info["permalink"]:
result = url_info["permalink"]
break
elif url_info["original_url"]:
result = url_info["original_url"]
return result
def insert_article(self, article_url):
article = dict()
article["first_snapshot_at"] = 0
article["last_snapshot_at"] = 0
article["next_snapshot_at"] = -1
article["snapshot_count"] = 0
article["url_hash"] = zlib.crc32(article_url.encode())
article["url"] = article_url
article["site_id"] = self.site_id
article["article_type"] = "FBPost"
article["created_at"] = int(time.time())
article["redirect_to"] = None
article_id = self.db.insert_article(article)
return article_id
@staticmethod
def log_crawler(viewed_count, new_count, existing_count, empty_count):
timestamp = f"crawler: viewed {viewed_count} posts, add {new_count} new posts, existing {existing_count} posts in database, empty response count #{empty_count} \n"
logger.debug(timestamp)
@staticmethod
def log_pipeline(article_id):
message = f"pipeline: [{STATUS_SUCCESS}] insert Article #{article_id} \n"
logger.info(message)
|
# import library zeroMQ dan time
import zmq
import time
# membuat context zeroMQ
context = zmq.Context()
# membuat socket publisher dan binding dengan IP server
sock = context.socket(zmq.PUB)
sock.bind("tcp://192.168.1.18:5680")
id = 0
while True:
# sleep selama 1 detik
time.sleep(1)
# mengupdate nilai id dan now dengan waktu saat ini
id, now = id+1, time.ctime()
# mengirimkan id dan waktu sekarang jika option_len = 1
message = "1-Update! >> #{id} >> {time}".format(id=id, time=now)
sock.send(message.encode('ascii'))
# mengirimkan id dan waktu sekarang jika option_len = 2
message = "2-Update! >> #{id} >> {time}".format(id=id, time=now)
sock.send(message.encode('ascii'))
id += 1
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import os
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from io_storages.serializers import ImportStorageSerializer, ExportStorageSerializer
from io_storages.redis.models import RedisImportStorage, RedisExportStorage
class RedisImportStorageSerializer(ImportStorageSerializer):
type = serializers.ReadOnlyField(default=os.path.basename(os.path.dirname(__file__)))
class Meta:
model = RedisImportStorage
fields = '__all__'
def validate(self, data):
data = super(RedisImportStorageSerializer, self).validate(data)
storage = RedisImportStorage(**data)
try:
storage.validate_connection()
except Exception as exc:
raise ValidationError(exc)
return data
class RedisExportStorageSerializer(ExportStorageSerializer):
type = serializers.ReadOnlyField(default=os.path.basename(os.path.dirname(__file__)))
class Meta:
model = RedisExportStorage
fields = '__all__'
|
#
# @lc app=leetcode id=237 lang=python3
#
# [237] Delete Node in a Linked List
#
# https://leetcode.com/problems/delete-node-in-a-linked-list/description/
#
# algorithms
# Easy (68.67%)
# Likes: 2914
# Dislikes: 9579
# Total Accepted: 641.1K
# Total Submissions: 932.4K
# Testcase Example: '[4,5,1,9]\n5'
#
# Write a function to delete a node in a singly-linked list. You will not be
# given access to the head of the list, instead you will be given access to the
# node to be deleted directly.
#
# It is guaranteed that the node to be deleted is not a tail node in the
# list.
#
#
# Example 1:
#
#
# Input: head = [4,5,1,9], node = 5
# Output: [4,1,9]
# Explanation: You are given the second node with value 5, the linked list
# should become 4 -> 1 -> 9 after calling your function.
#
#
# Example 2:
#
#
# Input: head = [4,5,1,9], node = 1
# Output: [4,5,9]
# Explanation: You are given the third node with value 1, the linked list
# should become 4 -> 5 -> 9 after calling your function.
#
#
# Example 3:
#
#
# Input: head = [1,2,3,4], node = 3
# Output: [1,2,4]
#
#
# Example 4:
#
#
# Input: head = [0,1], node = 0
# Output: [1]
#
#
# Example 5:
#
#
# Input: head = [-3,5,-99], node = -3
# Output: [5,-99]
#
#
#
# Constraints:
#
#
# The number of the nodes in the given list is in the range [2, 1000].
# -1000 <= Node.val <= 1000
# The value of each node in the list is unique.
# The node to be deleted is in the list and is not a tail node
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
return
# @lc code=end
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import bpy
class MHC_OT_MarkAsClothesOperator(bpy.types.Operator):
"""Mark this object to be used as clothes"""
bl_idname = "makeclothes.mark_as_clothes"
bl_label = "Mark selected object as clothes"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
return context.active_object is not None and context.active_object.type == 'MESH'
def execute(self, context):
context.active_object.MhObjectType = "Clothes"
self.report({'INFO'}, "Object marked as clothes")
return {'FINISHED'}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from basic_class.basic_class import basic_class
class realtime_inspector(basic_class):
def __init__(self):
super().__init__()
def log_this(self, *message, new_line=True):
for sub_mesg in message:
print(sub_mesg, end='')
if new_line:
print("")
def add_break(self, n_row, n_col=72, char="%"):
for idx_row in range(n_row):
for idx_col in range(n_col):
self.log_this(char, new_line=False)
self.log_this("")
|
from ._BoxBoundsCoercionISPSpecifier import BoxBoundsCoercionISPSpecifier
from ._MaskBoundsCoercionISPSpecifier import MaskBoundsCoercionISPSpecifier
|
from django.apps import AppConfig
class ScenicConfig(AppConfig):
name = 'scenic'
|
import threading
import time
import socket, subprocess,sys
from datetime import datetime
import thread
import shelve
'''section 1 '''
subprocess.call('clear',shell=True)
shelf = shelve.open("mohit.raj")
data=(shelf['desc'])
#shelf.sync()
'''section 2 '''
class myThread (threading.Thread):
def __init__(self, threadName,rmip,r1,r2,c):
threading.Thread.__init__(self)
self.threadName = threadName
self.rmip = rmip
self.r1 = r1
self.r2 = r2
self.c =c
def run(self):
scantcp(self.threadName,self.rmip,self.r1,self.r2,self.c)
'''section 3 '''
def scantcp(threadName,rmip,r1,r2,c):
try:
for port in range(r1,r2):
sock= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#sock= socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
socket.setdefaulttimeout(c)
result = sock.connect_ex((rmip,port))
if result==0:
print "Port Open:---->\t", port,"--", data.get(port, "Not in Database")
sock.close()
except KeyboardInterrupt:
print "You stop this "
sys.exit()
except socket.gaierror:
print "Hostname could not be resolved"
sys.exit()
except socket.error:
print "could not connect to server"
sys.exit()
shelf.close()
'''section 4 '''
print "*"*60
print " \tWelcome this is the Port scanner of Mohit\n "
d=raw_input("\t Press D for Domain Name or Press I for IP Address\t")
if (d=='D' or d=='d'):
rmserver = raw_input("\t Enter the Domain Name to scan:\t")
rmip = socket.gethostbyname(rmserver)
elif(d=='I' or d=='i'):
rmip = raw_input("\t Enter the IP Address to scan: ")
else:
print "Wrong input"
#rmip = socket.gethostbyname(rmserver)
r11 = int(raw_input("\t Enter the start port number\t"))
r21 = int (raw_input("\t Enter the last port number\t"))
conect=raw_input("For low connectivity press L and High connectivity Press H\t")
if (conect=='L' or conect=='l'):
c =1.5
elif(conect =='H' or conect=='h'):
c=0.5
else:
print "\t wrong Input"
print "\n Mohit's Scanner is working on ",rmip
print "*"*60
t1= datetime.now()
tp=r21-r11
tn =30
# tn number of port handled by one thread
tnum=tp/tn # tnum number of threads
if (tp%tn != 0):
tnum= tnum+1
if (tnum > 300):
tn = tp/300
tn= tn+1
tnum=tp/tn
if (tp%tn != 0):
tnum= tnum+1
'''section 5'''
threads= []
try:
for i in range(tnum):
#print "i is ",i
k=i
r2=r11+tn
# thread=str(i)
thread = myThread("T1",rmip,r11,r2,c)
thread.start()
threads.append(thread)
r11=r2
except:
print "Error: unable to start thread"
print "\t Number of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "Exiting Main Thread"
t2= datetime.now()
total =t2-t1
print "scanning complete in " , total
print "\n*****Thanks for using Mohit's Port Scanner****"
print "You can update database file"
print "use command python > python updata.py"
print "Give feedback to mohitraj.cs@gmail.com"
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 02:37:25 2013
@author: roel
"""
import unittest
import pandas as pd
import opengrid as og
from opengrid.library import weather
from opengrid.library.exceptions import *
dfw = og.datasets.get('weather_2016_hour')
class WeatherTest(unittest.TestCase):
def test_compute_degree_days(self):
res = weather.compute_degree_days(ts=dfw['temperature'],
heating_base_temperatures=[13, 16.5],
cooling_base_temperatures=[16.5, 24])
self.assertListEqual(sorted(['temp_equivalent', 'HDD_16.5', 'HDD_13', 'CDD_16.5', 'CDD_24']),
sorted(res.columns.tolist()))
def test_compute_degree_days_raises(self):
df_twodaily = dfw.resample(rule='2D').mean()
self.assertRaises(UnexpectedSamplingRate, weather.compute_degree_days, df_twodaily, [16], [16])
if __name__ == '__main__':
unittest.main()
|
import json
import requests
import time
import auth
import util
from instance import Instance
from exceptions import AlaudaServerError
MAX_RETRY_NUM = 10
INSTANCE_SIZES = ['XXS', 'XS', 'S', 'M', 'L', 'XL']
class Service(object):
def __init__(self, name, image_name, image_tag, target_num_instances=1, instance_size='XS', run_command='',
instance_ports=[], instance_envvars={}, volumes=[], links=[], details='', namespace=None,
scaling_mode='MANUAL', autoscaling_config={}, custom_domain_name='', region_name=None):
self.name = name
self.image_name = image_name
self.image_tag = image_tag
self.target_num_instances = target_num_instances
self.instance_size = instance_size
if instance_size not in INSTANCE_SIZES:
raise AlaudaServerError(400, 'instance_size must be one of {}'.format(INSTANCE_SIZES))
self.run_command = run_command
self.instance_envvars = instance_envvars
self.instance_ports = instance_ports
self.volumes = volumes
self.links = links
self.details = details
self.custom_domain_name = custom_domain_name
self.api_endpoint, self.token, self.username = auth.load_token()
self.headers = auth.build_headers(self.token)
self.namespace = namespace or self.username
self.scaling_mode = scaling_mode
self.autoscaling_config = autoscaling_config
self.region_name = region_name
def _update_envvars_with_links(self, instance_envvars, links, namespace=None):
linked_to = {}
if links is not None:
for link in links:
service_name = link[0]
alias = link[1]
linked_to[service_name] = alias
retry_num = 0
while retry_num < MAX_RETRY_NUM:
linked_service = Service.fetch(service_name, namespace)
linked_service_data = json.loads(linked_service.details)
linked_service_ports = linked_service_data['instance_ports']
if len(linked_service_ports) == 0:
break
# linked_service_envvars = json.loads(linked_service_data['instance_envvars'])
# linked_service_addr = linked_service_envvars['__DEFAULT_DOMAIN_NAME__']
key = '{0}_PORT'.format(alias).upper()
for port in linked_service_ports:
service_port = port.get('service_port')
if service_port is None:
retry_num = retry_num + 1
time.sleep(1)
break
retry_num = MAX_RETRY_NUM + 1
url = '{0}://{1}:{2}'.format(port['protocol'], port['default_domain'], service_port)
if key not in instance_envvars.keys():
instance_envvars[key] = url
pattern = '{0}_PORT_{1}_{2}'.format(alias, port['container_port'], port['protocol']).upper()
instance_envvars[pattern] = url
instance_envvars[pattern + '_ADDR'] = port['default_domain']
instance_envvars[pattern + '_PORT'] = str(service_port)
instance_envvars[pattern + '_PROTO'] = port['protocol']
if retry_num == MAX_RETRY_NUM:
raise AlaudaServerError(500, 'Timed out waiting for {} to acquire service port'.format(service_name))
return linked_to
def _create_remote(self, target_state):
linked_to = self._update_envvars_with_links(self.instance_envvars, self.links, self.namespace)
util.expand_environment(self.instance_envvars)
url = self.api_endpoint + 'services/{}/'.format(self.namespace)
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
"image_name": self.image_name,
"image_tag": self.image_tag,
"instance_size": self.instance_size,
"scaling_mode": "MANUAL",
"target_state": target_state,
"run_command": self.run_command,
"instance_envvars": self.instance_envvars,
"instance_ports": self.instance_ports,
'linked_to_apps': linked_to,
"volumes": self.volumes,
'scaling_mode': self.scaling_mode,
'autoscaling_config': self.autoscaling_config,
'custom_domain_name': self.custom_domain_name
}
if self.region_name:
payload['region_name'] = self.region_name
r = requests.post(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
@classmethod
def fetch(cls, name, namespace=None):
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/'.format(namespace or username) + name
headers = auth.build_headers(token)
r = requests.get(url, headers=headers)
util.check_response(r)
data = json.loads(r.text)
service = cls(name=data['service_name'],
image_name=data['image_name'],
image_tag=data['image_tag'],
target_num_instances=data['target_num_instances'],
instance_size=data['instance_size'],
details=r.text,
namespace=data['namespace'])
return service
@classmethod
def list(cls, namespace, page):
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/?page={}'.format(namespace or username, page)
headers = auth.build_headers(token)
r = requests.get(url, headers=headers)
util.check_response(r)
service_list = []
services = json.loads(r.text)
services = services.get('results', [])
for data in services:
try:
service = Service.fetch(data['service_name'], namespace)
service_list.append(service)
except AlaudaServerError:
continue
return service_list
@classmethod
def remove(cls, name, namespace=None):
print '[alauda] Removing service "{}"'.format(name)
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/'.format(namespace or username) + name
headers = auth.build_headers(token)
try:
r = requests.delete(url, headers=headers)
util.check_response(r)
except AlaudaServerError as ex:
if ex.status_code == 404:
print '[alauda] Service "{}" does not exist'.format(name)
else:
raise ex
def create(self):
print '[alauda] Creating service "{}"'.format(self.name)
self._create_remote('STOPPED')
def run(self):
print '[alauda] Creating and starting service "{}"'.format(self.name)
self._create_remote('STARTED')
def inspect(self):
if not self.details:
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
r = requests.get(url, headers=self.headers)
util.check_response(r)
self.details = r.text
return self.details
def start(self):
print '[alauda] Starting service "{}"'.format(self.name)
self.target_state = 'STARTED'
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name + '/start/'
r = requests.put(url, headers=self.headers)
util.check_response(r)
def stop(self):
print '[alauda] Stopping service "{}"'.format(self.name)
self.target_state = 'STOPPED'
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name + '/stop/'
r = requests.put(url, headers=self.headers)
util.check_response(r)
def scale(self, target_num_instances):
self.target_num_instances = target_num_instances
print '[alauda] Scaling service: {0} -> {1}'.format(self.name, self.target_num_instances)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def enable_autoscaling(self, autoscaling_config):
print '[alauda] Enabling auto-scaling for {0}'.format(self.name)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"scaling_mode": 'AUTO',
"autoscaling_config": autoscaling_config,
'app_name': self.name
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def disable_autoscaling(self, target_num_instances):
if target_num_instances is not None:
self.target_num_instances = target_num_instances
print '[alauda] Disabling auto-scaling for {0}. Target number of instances: {1}'.format(self.name, self.target_num_instances)
url = self.api_endpoint + 'services/{}/'.format(self.namespace) + self.name
payload = {
"app_name": self.name,
"target_num_instances": self.target_num_instances,
'scaling_mode': 'MANUAL'
}
r = requests.put(url, headers=self.headers, data=json.dumps(payload))
util.check_response(r)
def logs(self, start_time, end_time):
start, end = util.parse_time(start_time, end_time)
url = self.api_endpoint + 'services/{0}/{1}/logs?start_time={2}&end_time={3}'.format(self.namespace, self.name, start, end)
r = requests.get(url, headers=self.headers)
util.check_response(r)
return r.text
def get_run_command(self):
data = json.loads(self.details)
run_command = data['run_command']
if not run_command:
run_command = ' '
return run_command
def get_state(self):
data = json.loads(self.details)
return data.get('current_status')
def get_ports(self):
ports = ''
data = json.loads(self.details)
if not data['instance_ports']:
return ' '
for port in data['instance_ports']:
instance_envvars = json.loads(data['instance_envvars'])
ports = ports + '{0}:{1}->{2}/{3}, '.format(instance_envvars['__DEFAULT_DOMAIN_NAME__'],
port.get('service_port', ''),
port['container_port'],
port['protocol'])
return ports[:len(ports) - 2]
def get_instance(self, id):
url = self.api_endpoint + 'services/{0}/{1}/instances/{2}'.format(self.namespace, self.name, id)
r = requests.get(url, headers=self.headers)
util.check_response(r)
data = json.loads(r.text)
instance = Instance(service=self, uuid=data['uuid'], details=r.text)
return instance
def list_instances(self):
url = self.api_endpoint + 'services/{0}/{1}/instances/'.format(self.namespace, self.name)
r = requests.get(url, headers=self.headers)
util.check_response(r)
data = json.loads(r.text)
instance_list = []
for instance in data:
instance = Instance(service=self, uuid=instance['uuid'], details=json.dumps(instance))
instance_list.append(instance)
return instance_list
|
from mlhub.pkg import azkey, azrequest, mlask, mlcat
mlcat("Text Classification of MultiNLI Sentences Using BERT", """\
To run through the demo quickly the QUICK_RUN flag
is set to True and so uses a small subset of the data and
a smaller number of epochs.
The table below provides some reference running times on two
machine configurations for the full dataset.
|QUICK_RUN |Machine Configurations |Running time|
|----------|----------------------------------------|------------|
|False |4 CPUs, 14GB memory | ~19.5 hours|
|False |1 NVIDIA Tesla K80 GPUs, 12GB GPU memory| ~ 1.5 hours|
To avoid CUDA out-of-memory errors the BATCH_SIZE and MAX_LEN are reduced,
resulting in a compromised model performance but one that can be computed
on a typcial user's laptop. For best model performance this same script can
be run on cloud compute (Azure) with the parameters set to their usual values.
The first part of this demo will load a pre-built model, extend it
with new data, and then test the performance on the new data.
""")
QUICK_RUN = True
import sys
import os
import json
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report, accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from utils_nlp.dataset.multinli import load_pandas_df
from utils_nlp.eval.classification import eval_classification
from utils_nlp.models.bert.sequence_classification import BERTSequenceClassifier
from utils_nlp.models.bert.common import Language, Tokenizer
from utils_nlp.common.timer import Timer
# Ignore warnings - generally not a good thing to do, but for the
# user experience we do so here.
import warnings
warnings.filterwarnings("ignore")
mlask(end="\n")
mlcat("Introduction", """\
First we will fine-tune and evaluate a pretrained BERT model on a
subset of the MultiNLI dataset.
A sequence classifier is used to wrap Hugging Face's PyTorch
implementation of Google's BERT.
""")
mlask(end="\n")
TRAIN_DATA_USED_FRACTION = 1
TEST_DATA_USED_FRACTION = 1
NUM_EPOCHS = 1
if QUICK_RUN:
TRAIN_DATA_USED_FRACTION = 0.01
TEST_DATA_USED_FRACTION = 0.01
NUM_EPOCHS = 1
if torch.cuda.is_available():
BATCH_SIZE = 32
else:
BATCH_SIZE = 8
DATA_FOLDER = "./temp"
BERT_CACHE_DIR = "./temp"
LANGUAGE = Language.ENGLISH
TO_LOWER = True
MAX_LEN = 50 # 150
BATCH_SIZE_PRED = 256 # 512
TRAIN_SIZE = 0.6
LABEL_COL = "genre"
TEXT_COL = "sentence1"
mlcat("Read the Dataset", """\
We start by loading a subset of the data. The following function also
downloads and extracts the files, if they don't exist in the data
folder.
The MultiNLI dataset is mainly used for natural language inference (NLI)
tasks, where the inputs are sentence pairs and the labels are entailment
indicators. The sentence pairs are also classified into _genres_ that
allow for more coverage and better evaluation of NLI models.
For our classification task, we use the first sentence only as the text
input, and the corresponding genre as the label. We select the examples
corresponding to one of the entailment labels (_neutral_ in this case)
to avoid duplicate rows, as the sentences are not unique, whereas the
sentence pairs are.
Below we show the first few sentences from the dataset.
""")
df = load_pandas_df(DATA_FOLDER, "train")
df = df[df["gold_label"]=="neutral"] # get unique sentences
print(df[[LABEL_COL, TEXT_COL]].head())
mlask(begin="\n", end="\n")
mlcat("Genres", """\
The examples in the dataset are grouped into 5 genres:
""")
print(df[LABEL_COL].value_counts())
mlask(begin="\n", end="\n")
mlcat("Train and Test Datasets", """
We split the data for training and testing, and encode the class labels:
""")
# split
df_train, df_test = train_test_split(df, train_size = TRAIN_SIZE, random_state=0)
df_train = df_train.sample(frac=TRAIN_DATA_USED_FRACTION).reset_index(drop=True)
df_test = df_test.sample(frac=TEST_DATA_USED_FRACTION).reset_index(drop=True)
# encode labels
label_encoder = LabelEncoder()
labels_train = label_encoder.fit_transform(df_train[LABEL_COL])
labels_test = label_encoder.transform(df_test[LABEL_COL])
num_labels = len(np.unique(labels_train))
print("Number of unique labels: {}".format(num_labels))
print("Number of training examples: {}".format(df_train.shape[0]))
print("Number of testing examples: {}".format(df_test.shape[0]))
mlask(begin="\n", end="\n")
mlcat("Tokenize and Preprocess", """\
Before training, we tokenize the text documents and convert them to
lists of tokens. We instantiate a BERT tokenizer given the language,
and tokenize the text of the training and testing sets.
""")
tokenizer = Tokenizer(LANGUAGE, to_lower=TO_LOWER, cache_dir=BERT_CACHE_DIR)
tokens_train = tokenizer.tokenize(list(df_train[TEXT_COL]))
tokens_test = tokenizer.tokenize(list(df_test[TEXT_COL]))
mlask(begin="\n", end="\n")
mlcat("PreProcessing Steps", """\
We perform the following preprocessing steps:
- Convert the tokens into token indices corresponding to the BERT
tokenizer's vocabulary
- Add the special tokens [CLS] and [SEP] to mark the beginning and end
of a sentence
- Pad or truncate the token lists to the specified max length
- Return mask lists that indicate paddings' positions
- Return token type id lists that indicate which sentence the tokens
belong to (not needed for one-sequence classification)
See the original implementation for more information on BERT's input
format.
""")
mlask(end="\n")
tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(
tokens_train, MAX_LEN
)
tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(
tokens_test, MAX_LEN
)
mlcat("Create Model", """\
Next, we create a sequence classifier that loads a pre-trained BERT
model, given the language and number of labels.
""")
classifier = BERTSequenceClassifier(
language=LANGUAGE, num_labels=num_labels, cache_dir=BERT_CACHE_DIR
)
mlask(end="\n")
mlcat("Train", """\
We train the classifier using the training examples. This involves
fine-tuning the BERT Transformer and learning a linear classification
layer on top of that.
""")
with Timer() as t:
classifier.fit(
token_ids=tokens_train,
input_mask=mask_train,
labels=labels_train,
num_epochs=NUM_EPOCHS,
batch_size=BATCH_SIZE,
verbose=True,
)
print("[Training time: {:.3f} hrs]".format(t.interval / 3600))
mlask(begin="\n", end="\n")
mlcat("Score", """\
We now score the test set using the trained classifier to obtain an estimate
of how well the model performs.
""")
preds = classifier.predict(token_ids=tokens_test,
input_mask=mask_test,
batch_size=BATCH_SIZE_PRED)
mlask(begin="\n", end="\n")
mlcat("Evaluate Results", """\
Finally, we compute the accuracy, precision, recall, and F1 metrics of
the evaluation on the test set.
""")
report = classification_report(labels_test, preds, target_names=label_encoder.classes_, output_dict=True)
accuracy = accuracy_score(labels_test, preds )
print("accuracy: {:.2}\n".format(accuracy))
for g in report:
print(g)
for m in report[g]:
print(" {} = {}".format(m, round(report[g][m], 2)), end="")
print("\n")
mlask()
|
from functools import cmp_to_key
class Player:
def __init__(self, name, score):
self.name=name
self.score=score
def __repr__(self):
return {'name':self.name,'score':self.score}
@staticmethod
def comparator(a, b):
if a.score>b.score:
return -1
if a.score<b.score:
return 1
if a.name < b.name:
return -1
if a.name > b.name:
return 1
return 0
|
#!/usr/bin/env python3
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
import argparse
import json
import asyncio
import rclpy
from rclpy.node import Node
from rclpy.parameter import Parameter
from rclpy.qos import qos_profile_system_default
from rclpy.qos import QoSProfile
from rclpy.qos import QoSHistoryPolicy as History
from rclpy.qos import QoSDurabilityPolicy as Durability
from rclpy.qos import QoSReliabilityPolicy as Reliability
from rmf_task_msgs.msg import ApiRequest, ApiResponse
###############################################################################
class TaskRequester(Node):
def __init__(self, argv=sys.argv):
super().__init__('task_requester')
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pickups', required=True,
type=str, nargs='+',
help="Pickup names")
parser.add_argument('-d', '--dropoffs', required=True,
type=str, nargs='+',
help="Dropoff names")
parser.add_argument('-ph', '--pickup_handlers', required=True,
type=str, nargs='+',
help="Pickup handler names")
parser.add_argument('-dh', '--dropoff_handlers', required=True,
type=str, nargs='+',
help="Dropoffs handler names")
parser.add_argument('-pp', '--pickup_payloads',
type=str, nargs='+', default=[],
help="Pickup payload [sku,quantity sku2,qty...]")
parser.add_argument('-dp', '--dropoff_payloads',
type=str, nargs='+', default=[],
help="Dropoff payload [sku,quantity sku2,qty...]")
parser.add_argument('-F', '--fleet', type=str,
help='Fleet name, should define tgt with robot')
parser.add_argument('-R', '--robot', type=str,
help='Robot name, should define tgt with fleet')
parser.add_argument('-st', '--start_time',
help='Start time from now in secs, default: 0',
type=int, default=0)
parser.add_argument('-pt', '--priority',
help='Priority value for this request',
type=int, default=0)
parser.add_argument("--use_sim_time", action="store_true",
help='Use sim time, default: false')
self.args = parser.parse_args(argv[1:])
self.response = asyncio.Future()
# check user delivery arg inputs
if (len(self.args.pickups) != len(self.args.pickup_handlers)):
self.get_logger().error(
"Invalid pickups, [-p] should have the same length as [-ph]")
parser.print_help()
sys.exit(1)
if (len(self.args.dropoffs) != len(self.args.dropoff_handlers)):
self.get_logger().error(
"Invalid dropoffs, [-d] should have the same length as [-dh]")
parser.print_help()
sys.exit(1)
transient_qos = QoSProfile(
history=History.KEEP_LAST,
depth=1,
reliability=Reliability.RELIABLE,
durability=Durability.TRANSIENT_LOCAL)
self.pub = self.create_publisher(
ApiRequest, 'task_api_requests', transient_qos)
# enable ros sim time
if self.args.use_sim_time:
self.get_logger().info("Using Sim Time")
param = Parameter("use_sim_time", Parameter.Type.BOOL, True)
self.set_parameters([param])
# Construct task
msg = ApiRequest()
msg.request_id = "delivery_" + str(uuid.uuid4())
payload = {}
if self.args.fleet and self.args.robot:
self.get_logger().info("Using 'robot_task_request'")
payload["type"] = "robot_task_request"
payload["robot"] = self.args.robot
payload["fleet"] = self.args.fleet
else:
self.get_logger().info("Using 'dispatch_task_request'")
payload["type"] = "dispatch_task_request"
request = {}
# Set task request start time
now = self.get_clock().now().to_msg()
now.sec = now.sec + self.args.start_time
start_time = now.sec * 1000 + round(now.nanosec/10**6)
request["unix_millis_earliest_start_time"] = start_time
def __create_pickup_desc(index):
if index < len(self.args.pickup_payloads):
sku_qty = self.args.pickup_payloads[index].split(',')
assert len(sku_qty) == 2, \
"please specify sku and qty for pickup payload"
payload = [{"sku": sku_qty[0],
"quantity": int(sku_qty[1])}]
else:
payload = []
return {
"place": self.args.pickups[index],
"handler": self.args.pickup_handlers[index],
"payload": payload
}
def __create_dropoff_desc(index):
if index < len(self.args.dropoff_payloads):
sku_qty = self.args.dropoff_payloads[index].split(',')
assert len(sku_qty) == 2, \
"please specify sku and qty for dropoff payload"
payload = [{"sku": sku_qty[0],
"quantity": int(sku_qty[1])}]
else:
payload = []
return {
"place": self.args.dropoffs[index],
"handler": self.args.dropoff_handlers[index],
"payload": payload
}
# Use standard delivery task type
if len(self.args.pickups) == 1 and len(self.args.dropoffs) == 1:
request["category"] = "delivery"
description = {
"pickup": __create_pickup_desc(0),
"dropoff": __create_dropoff_desc(0)
}
else:
# Define multi_delivery with request category compose
request["category"] = "compose"
# Define task request description with phases
description = {} # task_description_Compose.json
description["category"] = "multi_delivery"
description["phases"] = []
activities = []
# Add each pickup
for i in range(0, len(self.args.pickups)):
activities.append({
"category": "pickup",
"description": __create_pickup_desc(i)})
# Add each dropoff
for i in range(0, len(self.args.dropoffs)):
activities.append({
"category": "dropoff",
"description": __create_dropoff_desc(i)})
# Add activities to phases
description["phases"].append(
{"activity": {
"category": "sequence",
"description": {"activities": activities}}})
request["description"] = description
payload["request"] = request
msg.json_msg = json.dumps(payload)
def receive_response(response_msg: ApiResponse):
if response_msg.request_id == msg.request_id:
self.response.set_result(json.loads(response_msg.json_msg))
self.sub = self.create_subscription(
ApiResponse, 'task_api_responses', receive_response, 10
)
print(f"Json msg payload: \n{json.dumps(payload, indent=2)}")
self.pub.publish(msg)
###############################################################################
def main(argv=sys.argv):
rclpy.init(args=sys.argv)
args_without_ros = rclpy.utilities.remove_ros_args(sys.argv)
task_requester = TaskRequester(args_without_ros)
rclpy.spin_until_future_complete(
task_requester, task_requester.response, timeout_sec=5.0)
if task_requester.response.done():
print(f'Got response:\n{task_requester.response.result()}')
else:
print('Did not get a response')
rclpy.shutdown()
if __name__ == '__main__':
main(sys.argv)
|
#MenuTitle: Transfer Hints to First Master
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Moves PostScript (stem and ghost) hints from the current layer to the first master layer, provided the paths are compatible.
"""
from GlyphsApp import TOPGHOST, BOTTOMGHOST, STEM, TTANCHOR, TTSTEM, TTALIGN, TTINTERPOLATE, TTDIAGONAL, TTDELTA
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
firstMaster = thisFont.masters[0]
firstMasterId = firstMaster.id
supportedHintTypes = (TOPGHOST, BOTTOMGHOST, STEM, TTANCHOR, TTSTEM, TTALIGN, TTINTERPOLATE, TTDIAGONAL, TTDELTA, )
def deleteHintsOnLayer(thisLayer):
for i in range(len(thisLayer.hints))[::-1]:
if thisLayer.hints[i].type in supportedHintTypes:
del thisLayer.hints[i]
def transferHintsFromTo( sourceLayer, targetLayer ):
# clean slate in targetLayer:
deleteHintsOnLayer(targetLayer)
# go through all hints in source layer:
for thisHint in sourceLayer.hints:
# if it is a recognized hint type...
if thisHint.type in supportedHintTypes and thisHint.originNode:
# ... create hint for target layer:
pathIndex = sourceLayer.paths.index(thisHint.originNode.parent)
originNodeIndex = thisHint.originNode.index
newHint = GSHint()
newHint.type = thisHint.type
newHint.originNode = targetLayer.paths[pathIndex].nodes[originNodeIndex]
newHint.horizontal = thisHint.horizontal
# ... look for optional nodes:
if thisHint.targetNode:
targetNodeIndex = thisHint.targetNode.index
targetPathIndex = sourceLayer.paths.index(thisHint.targetNode.parent)
newHint.targetNode = targetLayer.paths[targetPathIndex].nodes[targetNodeIndex]
if thisHint.otherNode1:
targetNodeIndex = thisHint.otherNode1.index
targetPathIndex = sourceLayer.paths.index(thisHint.otherNode1.parent)
newHint.otherNode1 = targetLayer.paths[targetPathIndex].nodes[targetNodeIndex]
if thisHint.otherNode2:
targetNodeIndex = thisHint.otherNode2.index
targetPathIndex = sourceLayer.paths.index(thisHint.otherNode2.parent)
newHint.otherNode2 = targetLayer.paths[targetPathIndex].nodes[targetNodeIndex]
# ... and add to target layer:
targetLayer.hints.append(newHint)
# ... delete hints in source layer:
deleteHintsOnLayer(sourceLayer)
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
# brings macro window to front and clears its log:
Glyphs.clearLog()
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
if thisLayer.layerId != firstMasterId:
firstLayer = thisGlyph.layers[firstMasterId]
if thisGlyph.mastersCompatibleForLayers_([thisLayer,firstLayer]):
print("Transfering hints in: %s" % thisGlyph.name)
thisGlyph.beginUndo() # begin undo grouping
transferHintsFromTo( thisLayer, firstLayer )
thisGlyph.endUndo() # end undo grouping
else:
Glyphs.showMacroWindow()
print("%s: layers incompatible." % thisGlyph.name)
else:
Glyphs.showMacroWindow()
print("%s: layer '%s' is already the first master layer." % (thisGlyph.name,thisLayer.name))
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
"""
This module is the test suite of leggedsnake.
It uses unit test.
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project:
## Author: Oliver Watts - owatts@staffmail.ed.ac.uk
import sys
import os
import glob
import math
import tempfile
import struct
import re
import copy
import h5py
from argparse import ArgumentParser
import numpy
import numpy as np
# import pylab
import numpy as np
#import scipy.signal
from segmentaxis import segment_axis
from speech_manip import get_speech, read_wave
from mulaw2 import lin2mu
from label_manip import extract_quinphone
from util import splice_data, unsplice, safe_makedir, readlist
import const
from const import label_delimiter, vuv_stream_names, label_length_diff_tolerance, target_rep_widths
import resample
import resample_labels
import varying_filter
NORMWAVE=False # False
def locate_stream_directories(directories, streams):
'''
For each stream in streams, find a subdirectory for some directory in
directories, directory/stream. Make sure that there is only 1 such subdirectory
named after the stream. Return dict mapping from stream names to directory locations.
'''
stream_directories = {}
for stream in streams:
for directory in directories:
candidate_dir = os.path.join(directory, stream)
if os.path.isdir(candidate_dir):
## check unique:
if stream in stream_directories:
sys.exit('Found at least 2 directories for stream %s: %s and %s'%(stream, stream_directories[stream], candidate_dir))
stream_directories[stream] = candidate_dir
## check we found a location for each stream:
for stream in streams:
if stream not in stream_directories:
sys.exit('No subdirectory found under %s for stream %s'%(','.join(directories), stream))
return stream_directories
def main_work(config, overwrite_existing_data=False):
## (temporary) assertions:-
config['standardise_target_data'] = True
assert config['standardise_target_data'] == True
config['joincost_features'] = True ## want to use self. here, but no class defined...
if config['target_representation'] == 'sample':
config['joincost_features'] = False
database_fname = get_data_dump_name(config)
if os.path.isfile(database_fname):
if not overwrite_existing_data:
sys.exit('Data already exists at %s -- run with -X to overwrite it'%(database_fname))
else:
os.system('rm '+database_fname)
n_train_utts = config['n_train_utts']
target_feat_dirs = config['target_datadirs']
datadims_target = config['datadims_target']
stream_list_target = config['stream_list_target']
## get dicts mapping e.g. 'mgc': '/path/to/mgc/' : -
target_stream_dirs = locate_stream_directories(target_feat_dirs, stream_list_target)
if config['joincost_features']:
join_feat_dirs = config['join_datadirs']
datadims_join = config['datadims_join']
stream_list_join = config['stream_list_join']
## get dicts mapping e.g. 'mgc': '/path/to/mgc/' : -
join_stream_dirs = locate_stream_directories(join_feat_dirs, stream_list_join)
# for stream in stream_list_target:
# stream_dir = os.path.join(target_feat_dir, stream)
# assert os.path.isdir(stream_dir), 'Directory %s not accessible'%(stream_dir)
# for stream in stream_list_join:
# stream_dir = os.path.join(join_feat_dir, stream)
# assert os.path.isdir(stream_dir), 'Directory %s not accessible'%(stream_dir)
## First, work out initial list of training utterances based on files present in first stream subdir:
first_stream = stream_list_target[0] ## <-- typically, mgc
utt_list = sorted(glob.glob(target_stream_dirs[first_stream] +'/*.' + first_stream))
flist = [os.path.split(fname)[-1].replace('.'+first_stream,'') for fname in utt_list]
## Next, limit training utterances by number or by pattern:
if type(n_train_utts) == int:
if (n_train_utts == 0 or n_train_utts > len(flist)):
n_train_utts = len(flist)
flist = flist[:n_train_utts]
elif type(n_train_utts) == str:
match_expression = n_train_utts
flist = [name for name in flist if match_expression in name]
print 'Selected %s utts with pattern %s'%(len(flist), match_expression)
## Also filter for test material, in case they are in same directory:
test_flist = []
for fname in flist:
for pattern in config['test_patterns']:
if pattern in fname:
test_flist.append(fname)
flist = [name for name in flist if name not in test_flist]
## Finally, only take utterances which occur in train_list, if it is given in config:
if 'train_list' in config:
assert os.path.isfile(config['train_list']), 'File %s does not exist'%(config['train_list'])
train_list = readlist(config['train_list'])
train_list = dict(zip(train_list, train_list))
flist = [name for name in flist if name in train_list]
assert len(flist) > 0
## 1A) First pass: get mean and std per stream for each of {target,join}
(mean_vec_target, std_vec_target) = get_mean_std(target_stream_dirs, stream_list_target, datadims_target, flist)
if config['joincost_features']:
(mean_vec_join, std_vec_join) = get_mean_std(join_stream_dirs, stream_list_join, datadims_join, flist)
## Get std of (transformed) waveform if doing sample synthesis
if config['target_representation'] == 'sample':
wave_mu_sigma = get_wave_mean_std(config['wav_datadir'], flist, config['sample_rate'], nonlin_wave=config['nonlin_wave'])
## 1B) Initialise HDF5; store mean and std in HDF5:
f = h5py.File(database_fname, "w")
mean_target_dset = f.create_dataset("mean_target", np.shape(mean_vec_target), dtype='f', track_times=False)
std_target_dset = f.create_dataset("std_target", np.shape(std_vec_target), dtype='f', track_times=False)
if config['joincost_features']:
mean_join_dset = f.create_dataset("mean_join", np.shape(mean_vec_join), dtype='f', track_times=False)
std_join_dset = f.create_dataset("std_join", np.shape(std_vec_join), dtype='f', track_times=False)
mean_target_dset[:] = mean_vec_target[:]
std_target_dset[:] = std_vec_target[:]
if config['joincost_features']:
mean_join_dset[:] = mean_vec_join[:]
std_join_dset[:] = std_vec_join[:]
## Set some values....
target_dim = mean_vec_target.shape[0]
if config['joincost_features']:
join_dim = mean_vec_join.shape[0]
target_rep_size = target_dim * target_rep_widths[config.get('target_representation', 'twopoint')]
if config.get('add_duration_as_target', False):
target_rep_size += 1
fshift_seconds = (0.001 * config['frameshift_ms'])
fshift = int(config['sample_rate'] * fshift_seconds)
samples_per_frame = fshift
print 'go through data to find number of units:- '
n_units = 0
if config.get('add_duration_as_target', False):
duration_data = {}
if config['target_representation'] in ['epoch', 'sample']:
new_flist = []
print target_stream_dirs
first_stream, first_streamdir = sorted(target_stream_dirs.items())[0]
for base in flist:
featfile = os.path.join(first_streamdir, base + '.' + first_stream)
if not os.path.exists(featfile):
print 'skipping %s'%(featfile)
continue
speech = get_speech(featfile, datadims_target[first_stream])
npoint, _ = speech.shape
n_units += npoint
new_flist.append(base)
flist = new_flist
else:
for base in flist:
labfile = os.path.join(config['label_datadir'], base + '.' + config['lab_extension'])
label = read_label(labfile, config['quinphone_regex'])
n_states = len(label)
assert n_states % 5 == 0
n_halfphones = (n_states / 5) * 2
n_units += n_halfphones
if config.get('add_duration_as_target', False):
vals = get_halfphone_lengths(label)
for (val, dur) in vals:
if val not in duration_data:
duration_data[val] = []
duration_data[val].append(dur)
if config.get('add_duration_as_target', False):
duration_stats = {}
for (phone, vals) in duration_data.items():
vals = np.array(vals)
duration_stats[phone] = (vals.mean(), max(vals.std(), 0.001)) ## variance floor
if config['target_representation'] == 'sample':
n_units *= (config['sample_rate']*fshift_seconds)
print '%s units (%s)'%(n_units, config['target_representation'])
## 2) get ready to store data in HDF5:
total_target_dim = target_rep_size
if config['target_representation'] == 'sample':
total_target_dim = config['wave_context_length'] + target_rep_size
## maxshape makes a dataset resizable
train_dset = f.create_dataset("train_unit_features", (n_units, total_target_dim), maxshape=(n_units, total_target_dim), dtype='f', track_times=False)
if config['target_representation'] == 'sample':
#wavecontext_dset = f.create_dataset("wavecontext", (n_units, config['wave_context_length']), maxshape=(n_units,config['wave_context_length']), dtype='i')
nextsample_dset = f.create_dataset("nextsample", (n_units, 1), maxshape=(n_units,1), dtype='f', track_times=False)
else:
phones_dset = f.create_dataset("train_unit_names", (n_units,), maxshape=(n_units,), dtype='|S50', track_times=False)
filenames_dset = f.create_dataset("filenames", (n_units,), maxshape=(n_units,), dtype='|S50', track_times=False)
unit_index_within_sentence_dset = f.create_dataset("unit_index_within_sentence_dset", (n_units,), maxshape=(n_units,), dtype='i', track_times=False)
if config['target_representation'] == 'epoch':
cutpoints_dset = f.create_dataset("cutpoints", (n_units,3), maxshape=(n_units,3), dtype='i', track_times=False)
else:
cutpoints_dset = f.create_dataset("cutpoints", (n_units,2), maxshape=(n_units,2), dtype='i', track_times=False)
# hardcoded for pitch sync cost, unless epoch selection, in whcih case natural 2:
if config['target_representation'] == 'epoch':
join_dim *= 2
join_contexts_dset = f.create_dataset("join_contexts", (n_units + 1, join_dim), maxshape=(n_units + 1, join_dim), dtype='f', track_times=False)
if config.get('store_full_magphase', False):
mp_mag_dset = f.create_dataset("mp_mag", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_imag_dset = f.create_dataset("mp_imag", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_real_dset = f.create_dataset("mp_real", (n_units, 513), maxshape=(n_units, 513), dtype='f', track_times=False)
mp_fz_dset = f.create_dataset("mp_fz", (n_units, 1), maxshape=(n_units, 1), dtype='f', track_times=False)
## Optionally dump some extra data which can be used for training a better join cost:-
if config.get('dump_join_data', False):
join_database_fname = get_data_dump_name(config, joindata=True)
fjoin = h5py.File(join_database_fname, "w")
halfwin = config['join_cost_halfwidth']
start_join_feats_dset = fjoin.create_dataset("start_join_feats", (n_units, halfwin*join_dim), maxshape=(n_units, halfwin*join_dim), dtype='f', track_times=False)
end_join_feats_dset = fjoin.create_dataset("end_join_feats", (n_units, halfwin*join_dim), maxshape=(n_units, halfwin*join_dim), dtype='f', track_times=False)
## Standardise data (within streams), compose, add VUV, fill F0 gaps with utterance mean voiced value:
start = 0
print 'Composing ....'
print flist
new_flist = []
for base in flist:
print base
pm_file = os.path.join(config['pm_datadir'], base + '.pm')
## only actually need wave in sample case:-
if config['target_representation'] == 'sample':
wname = os.path.join(config['wav_datadir'], base + '.wav')
if not os.path.isfile(wname):
print 'Warning: no wave -- skip!'
continue
if not(os.path.isfile(pm_file)):
print 'Warning: no pm -- skip!'
continue
## Get pitchmarks (to join halfphones on detected GCIs):-
pms_seconds = read_pm(pm_file)
if pms_seconds.shape == (1,1):
print 'Warning: trouble reading pm file -- skip!'
continue
### Get speech params for target cost (i.e. probably re-generated speech for consistency):
t_speech = compose_speech(target_stream_dirs, base, stream_list_target, datadims_target)
# print t_speech
# print t_speech.shape
# sys.exit('sedvsbvsfrb')
if t_speech.shape == [1,1]: ## bad return value
continue
### upsample before standardisation (inefficient, but standardisation rewrites uv values?? TODO: check this)
if config['target_representation'] == 'sample':
nframes, _ = t_speech.shape
### orignally:
#len_wave = int(config['sample_rate'] * fshift_seconds * nframes)
wavecontext, nextsample = get_waveform_fragments(wname, config['sample_rate'], config['wave_context_length'], nonlin_wave=config['nonlin_wave'], norm=wave_mu_sigma, wave_context_type=config.get('wave_context_type', 0))
len_wave, _ = wavecontext.shape
t_speech = resample.upsample(len_wave, config['sample_rate'], fshift_seconds, t_speech, f0_dim=-1, convention='world')
if t_speech.size == 0:
print 'Warning: trouble upsampling -- skip!'
continue
if config['standardise_target_data']:
t_speech = standardise(t_speech, mean_vec_target, std_vec_target)
if config['target_representation'] == 'sample':
t_speech = np.hstack([wavecontext, t_speech])
if config['joincost_features']:
### Get speech params for join cost (i.e. probably natural speech).
### These are now expected to have already been resampled so that they are pitch-synchronous.
j_speech = compose_speech(join_stream_dirs, base, stream_list_join, datadims_join)
print 'j shape'
print j_speech.shape
if j_speech.size == 1: ## bad return value
continue
if config.get('standardise_join_data', True):
j_speech = standardise(j_speech, mean_vec_join, std_vec_join)
j_frames, j_dim = j_speech.shape
if j_frames != len(pms_seconds):
print (j_frames, len(pms_seconds))
print 'Warning: number of rows in join cost features not same as number of pitchmarks:'
print 'these features should be pitch synchronous. Skipping utterance!'
continue
if config['target_representation'] == 'epoch':
t_frames, t_dim = t_speech.shape
print 't shape'
print t_speech.shape
if j_frames != len(pms_seconds):
print (t_frames, len(pms_seconds))
print 'Warning: number of rows in target cost features not same as number of pitchmarks:'
print 'these features should be pitch synchronous (when target_representation == epoch). Skipping utterance!'
continue
# j_speech = j_speech[1:-1,:] ## remove first and last frames corresponding to terminal pms
# j_frames -= 2
if not config['target_representation'] in ['epoch', 'sample']: ### TODO: pitch synchronise labels...
### get labels:
labfile = os.path.join(config['label_datadir'], base + '.' + config['lab_extension'])
labs = read_label(labfile, config['quinphone_regex']) ### __pp: pitch sync label?
label_frames = labs[-1][0][1] ## = How many (5msec) frames does label correspond to?
## Has silence been trimmed from either t_speech or j_speech?
## Assume pitch synch join features are not silence trimmed
# if config.get('untrim_silence_join_speech', False):
# print 'Add trimmed silence back to join cost speech features'
# j_speech = reinsert_terminal_silence(j_speech, labs)
if config.get('untrim_silence_target_speech', False):
print 'Add trimmed silence back to target cost speech features'
t_speech = reinsert_terminal_silence(t_speech, labs)
# ### TODO: Length of T and J does not quite match here :-( need to debug.
# print 'T'
# print t_speech.shape
# print 'J'
# print j_speech.shape
# print 'L'
# print label_frames
## Pad or trim speech to match the length of the labels (within a certain tolerance):-
t_speech = pad_speech_to_length(t_speech, labs)
if DODEBUG:
check_pitch_sync_speech(j_speech, labs, pms_seconds)
#j_speech = pad_speech_to_length(j_speech, labs) ## Assume pitch synch join features are all OK
## Discard sentences where length of speech and labels differs too much:-
if t_speech.size==1:
print 'Skip utterance'
continue
# if j_speech.size==1:
# print 'Skip utterance'
# continue
sample_rate = config.get('sample_rate', 48000)
if config['target_representation'] == 'sample':
unit_features = t_speech
elif config['target_representation'] == 'epoch':
## Get representations of half phones to use in target cost:-
unit_features = t_speech[1:-1, :]
## Find 'cutpoints': pitchmarks which are considered to be the boudnaries of units, and where those
## units will be concatenated:
#cutpoints, cutpoint_indices = get_cutpoints(timings, pms_seconds)
pms_samples = np.array(pms_seconds * sample_rate, dtype=int)
cutpoints = segment_axis(pms_samples, 3, overlap=2, axis=0)
#context_data = j_speech[1:-1, :]
m,n = j_speech.shape
context_data = segment_axis(j_speech, 2, overlap=1, axis=0).reshape((m-1, n*2))
ADD_PHONETIC_EPOCH = False
if ADD_PHONETIC_EPOCH:
labfile = os.path.join(config['label_datadir'], base + '.' + config['lab_extension'])
labs = read_label(labfile, config['quinphone_regex'])
unit_names = resample_labels.pitch_synchronous_resample_label(sample_rate, 0.005, pms_samples, labs)
unit_names = unit_names[1:-1]
else:
unit_names = np.array(['_']*(t_speech.shape[0]-2))
else:
## Get representations of half phones to use in target cost:-
unit_names, unit_features, timings = get_halfphone_stats(t_speech, labs, config.get('target_representation', 'twopoint'))
if config.get('add_duration_as_target', False):
norm_durations = get_norm_durations(unit_names, timings, duration_stats)
unit_features = np.hstack([unit_features, norm_durations])
## Find 'cutpoints': pitchmarks which are considered to be the boudnaries of units, and where those
## units will be concatenated:
cutpoints, cutpoint_indices = get_cutpoints(timings, pms_seconds, sample_rate)
#context_data = get_contexts_for_natural_joincost(j_speech, timings, width=2)
context_data = get_contexts_for_pitch_synchronous_joincost(j_speech, cutpoint_indices)
m,n = unit_features.shape
if config['joincost_features']: ## i.e. don't store this in sample-based case
filenames = [base] * len(cutpoints)
o,p = context_data.shape
# if config['target_representation'] == 'epoch':
# assert o == m, (o, m)
# else:
assert o == m+1, (o, m)
unit_index_within_sentence = np.arange(m)
if config.get('dump_join_data', False):
start_join_feats, end_join_feats = get_join_data_AL(j_speech, cutpoint_indices, config['join_cost_halfwidth'])
CHECK_MAGPHASE_SIZES = False
if CHECK_MAGPHASE_SIZES: # config.get('store_full_magphase', False):
print 'CHECK_MAGPHASE_SIZES'
for extn in ['mag','imag','real','f0']:
direc = extn + '_full'
if extn == 'f0':
sdim = 1
else:
sdim = 513
fname = os.path.join(config['full_magphase_dir'], direc, base+'.'+extn)
full_stream = get_speech(fname, sdim)
#full_stream = full_stream[1:-1,:]
print direc
print full_stream.shape
if config.get('store_full_magphase', False):
mp_data = []
for extn in ['mag','imag','real','f0']:
direc = extn + '_full'
if extn == 'f0':
sdim = 1
else:
sdim = 513
fname = os.path.join(config['full_magphase_dir'], direc, base+'.'+extn)
full_stream = get_speech(fname, sdim)
full_stream = full_stream[1:-1,:]
print direc
print full_stream.shape
mp_data.append(full_stream)
## Add everything to database:
train_dset[start:start+m, :] = unit_features
if config['joincost_features']:
phones_dset[start:start+m] = unit_names
filenames_dset[start:start+m] = filenames
unit_index_within_sentence_dset[start:start+m] = unit_index_within_sentence
cutpoints_dset[start:start+m,:] = cutpoints
join_contexts_dset[start:start+m, :] = context_data[:-1,:]
if config.get('dump_join_data', False):
start_join_feats_dset[start:start+m, :] = start_join_feats
end_join_feats_dset[start:start+m, :] = end_join_feats
if config['target_representation'] == 'sample':
#wavecontext_dset[start:start+m, :] = wavecontext
nextsample_dset[start:start+m, :] = nextsample
if config.get('store_full_magphase', False):
(mp_mag, mp_imag, mp_real, mp_fz) = mp_data
mp_mag_dset[start:start+m, :] = mp_mag
mp_imag_dset[start:start+m, :] = mp_imag
mp_real_dset[start:start+m, :] = mp_real
mp_fz_dset[start:start+m, :] = mp_fz
start += m
new_flist.append(base)
if config['target_representation'] not in ['epoch', 'sample']:
## add database final join context back on (kind of messy)
join_contexts_dset[m, :] = context_data[-1,:]
## Number of units was computed before without considering dropped utterances, actual number
## will be smaller. Resize the data:
actual_nframes = start
print '\n\n\nNumber of units actually written:'
print actual_nframes
print
train_dset.resize(actual_nframes, axis=0)
if config['joincost_features']:
phones_dset.resize(actual_nframes, axis=0)
filenames_dset.resize(actual_nframes, axis=0)
unit_index_within_sentence_dset.resize(actual_nframes, axis=0)
cutpoints_dset.resize(actual_nframes, axis=0)
join_contexts_dset.resize(actual_nframes+1, axis=0)
if config['target_representation'] == 'sample':
# wavecontext_dset.resize(actual_nframes, axis=0)
nextsample_dset.resize(actual_nframes, axis=0)
## Store waveform standardisation info:
wave_mu_sigma_dset = f.create_dataset("wave_mu_sigma", np.shape(wave_mu_sigma), dtype='f', track_times=False)
wave_mu_sigma_dset[:] = wave_mu_sigma
if config.get('store_full_magphase', False):
mp_mag_dset.resize(actual_nframes, axis=0)
mp_imag_dset.resize(actual_nframes, axis=0)
mp_real_dset.resize(actual_nframes, axis=0)
mp_fz_dset.resize(actual_nframes, axis=0)
if config.get('add_duration_as_target', False):
duration_stats = duration_stats.items()
duration_stats.sort()
duration_monophones = np.array([k for (k,v) in duration_stats])
duration_stats = np.array([v for (k,v) in duration_stats])
f.create_dataset("duration_monophones", duration_monophones.shape, dtype='|S50', track_times=False)
f["duration_monophones"][:] = duration_monophones
f.create_dataset("duration_stats", duration_stats.shape, dtype='f', track_times=False)
f["duration_stats"][:,:] = duration_stats
print
print 'Storing hybrid voice data:'
for thing in f.values():
print thing
# print '-------a'
# t = f["train_unit_features"][:,:]
# print np.mean(t, axis=0).tolist()
# print np.std(t, axis=0).tolist()
# print np.min(t, axis=0).tolist()
# print np.max(t, axis=0).tolist()
# sys.exit('uuuuuuuu')
f.close()
print 'Stored training data for %s sentences to %s'%(n_train_utts, database_fname)
if config.get('dump_join_data', False):
start_join_feats_dset.resize(actual_nframes, axis=0)
end_join_feats_dset.resize(actual_nframes, axis=0)
print
print 'Storing data for learning join cost:'
for thing in fjoin.values():
print thing
fjoin.close()
def check_pitch_sync_speech(j_speech, labs, pms_seconds):
print '-----------------------'
print 'check_pitch_sync_speech'
print j_speech.shape
print labs[-1]
print len(pms_seconds)
print
def reinsert_terminal_silence(speech, labels, silence_symbols=['#']):
initial_silence_end = 0
final_silence_start = -1
for ((s,e), quinphone) in labels:
if quinphone[2] in silence_symbols:
initial_silence_end = e
else:
break
for ((s,e), quinphone) in reversed(labels):
if quinphone[2] in silence_symbols:
final_silence_start = s
else:
break
m,n = speech.shape
label_frames = labels[-1][0][1]
end_sil_length = label_frames - final_silence_start
start_sil_length = initial_silence_end
padded_speech = numpy.vstack([numpy.zeros((start_sil_length, n)) , speech , numpy.zeros((end_sil_length, n))])
# padded_speech = numpy.zeros((label_frames, n))
# print speech.shape
# print padded_speech.shape
# print initial_silence_end, final_silence_start
# print padded_speech[initial_silence_end:final_silence_start, :].shape
# padded_speech[initial_silence_end:final_silence_start, :] = speech
return padded_speech
def get_mean(flist, dim, exclude_uv=False):
'''
Take mean over each coeff, to centre their trajectories around zero.
'''
frame_sum = np.zeros(dim)
frame_count = 0
for fname in flist:
if not os.path.isfile(fname):
continue
print 'mean: ' + fname
speech = get_speech(fname, dim)
if np.sum(np.isnan(speech)) + np.sum(np.isinf(speech)) > 0:
print 'EXCLUDE ' + fname
continue
if exclude_uv:
## remove speech where first column is <= 0.0
speech = speech[speech[:,0]>0.0, :]
frame_sum += speech.sum(axis=0)
m,n = np.shape(speech)
frame_count += m
mean_vec = frame_sum / float(frame_count)
return mean_vec, frame_count
def get_std(flist, dim, mean_vec, exclude_uv=False):
'''
Unlike mean, use single std value over all coeffs in stream, to preserve relative differences in range of coeffs within a stream
The value we use is the largest std across the coeffs, which means that this stream when normalised
will have std of 1.0, and other streams decreasing.
Reduplicate this single value to vector the width of the stream.
'''
diff_sum = np.zeros(dim)
frame_count = 0
for fname in flist:
if not os.path.isfile(fname):
continue
print 'std: ' + fname
speech = get_speech(fname, dim)
if np.sum(np.isnan(speech)) + np.sum(np.isinf(speech)) > 0:
print 'EXCLUDE ' + fname
continue
if exclude_uv:
## remove speech where first column is <= 0.0
speech = speech[speech[:,0]>0.0, :]
m,n = np.shape(speech)
#mean_mat = np.tile(mean_vec,(m,1))
mean_vec = mean_vec.reshape((1,-1))
sq_diffs = (speech - mean_vec) ** 2
diff_sum += sq_diffs.sum(axis=0)
frame_count += m
max_diff_sum = diff_sum.max()
print mean_vec.tolist()
print max_diff_sum.tolist()
std_val = (max_diff_sum / float(frame_count)) ** 0.5
std_vec = np.ones((1,dim)) * std_val
return std_vec
def standardise(speech, mean_vec, std_vec):
m,n = np.shape(speech)
### record where unvoiced values are with Boolean array, so we can revert them later:
uv_positions = (speech==const.special_uv_value)
mean_vec = mean_vec.reshape((1,-1))
## standardise:-
speech = (speech - mean_vec) / std_vec
uv_values = std_vec * -1.0 * const.uv_scaling_factor
for column in range(n):
# print speech[:,column].shape
# print uv_positions[:,column].shape
# print speech[:,column]
# print uv_positions[:,column]
# print column
#if True in uv_positions[:,column]:
speech[:,column][uv_positions[:,column]] = uv_values[0, column]
## leave weighting till later!
return speech
def destandardise(speech, mean_vec, std_vec):
m,n = np.shape(speech)
mean_vec = mean_vec.reshape((1,-1))
#std_mat = np.tile(std_vec,(m,1))
#weight_mat = np.tile(weight_vec,(m,1))
## standardise:-
speech = (speech * std_vec) + mean_vec
## leave weighting till later!
# speech = speech * weight_mat
return speech
DODEBUG = False
def debug(msg):
if DODEBUG:
print msg
def compose_speech(feat_dir_dict, base, stream_list, datadims, ignore_streams=['triphone']):
'''
where there is trouble, signal this by returning a 1 x 1 matrix
'''
stream_list = [stream for stream in stream_list if stream not in ignore_streams]
# mgc_fn = os.path.join(indir, 'mgc', base+'.mgc' )
# f0_fn = os.path.join(indir, 'f0', base+'.f0' )
# ap_fn = os.path.join(indir, 'ap', base+'.ap' )
stream_data_list = []
for stream in stream_list:
stream_fname = os.path.join(feat_dir_dict[stream], base+'.'+stream )
if not os.path.isfile(stream_fname):
print stream_fname + ' does not exist'
return np.zeros((1,1))
stream_data = get_speech(stream_fname, datadims[stream])
if stream == 'aef':
stream_data = np.vstack([np.zeros((1,datadims[stream])), stream_data, np.zeros((1,datadims[stream]))])
### previously:
# if stream in vuv_stream_names:
# uv_ix = np.arange(stream_data.shape[0])[stream_data[:,0]<=0.0]
# vuv = np.ones(stream_data.shape)
# vuv[uv_ix, :] = 0.0
# ## set F0 to utterance's voiced frame mean in unvoiced frames:
# voiced = stream_data[stream_data>0.0]
# if voiced.size==0:
# voiced_mean = 100.0 ### TODO: fix artibrary nnumber!
# else:
# voiced_mean = voiced.mean()
# stream_data[stream_data<=0.0] = voiced_mean
# stream_data_list.append(stream_data)
# stream_data_list.append(vuv)
### Now, just set unvoiced frames to -1.0 (they will be specially weighted later):
if stream in vuv_stream_names:
# uv_ix = np.arange(stream_data.shape[0])[stream_data[:,0]<=0.0]
# vuv = np.ones(stream_data.shape)
# vuv[uv_ix, :] = 0.0
## set F0 to utterance's voiced frame mean in unvoiced frames:
# voiced = stream_data[stream_data>0.0]
# if voiced.size==0:
# voiced_mean = 100.0 ### TODO: fix artibrary nnumber!
# else:
# voiced_mean = voiced.mean()
stream_data[stream_data<=0.0] = const.special_uv_value
stream_data_list.append(stream_data)
# stream_data_list.append(vuv)
else:
stream_data_list.append(stream_data)
## where data has different number of frames per stream, chop off the extra frames:
frames = [np.shape(data)[0] for data in stream_data_list]
nframe = min(frames)
stream_data_list = [data[:nframe,:] for data in stream_data_list]
speech = np.hstack(stream_data_list)
return speech
def read_pm(fname):
f = open(fname, 'r')
lines = f.readlines()
f.close()
for (i,line) in enumerate(lines):
if line.startswith('EST_Header_End'):
start = i+1
break
lines = lines[start:]
lines = [float(re.split('\s+',line)[0]) for line in lines]
lines = np.array(lines)
## debug: make sure monotonic increase
start_end = segment_axis(lines, 2, overlap=1)
diffs = start_end[:,1] - start_end[:,0]
neg_diffs = (diffs < 0.0)
if sum(neg_diffs) > 0:
print ('WARNING: pitch marks not monotonically increasing in %s'%(fname))
return np.ones((1,1))
return lines
def get_data_dump_name(config, joindata=False, joinsql=False, searchtree=False):
safe_makedir(os.path.join(config['workdir'], 'data_dumps'))
condition = make_train_condition_name(config)
assert not (joindata and joinsql)
if joindata:
last_part = '.joindata.hdf5'
elif joinsql:
last_part = '.joindata.sql'
elif searchtree:
last_part = '.searchtree.hdf5'
else:
last_part = '.hdf5'
database_fname = os.path.join(config['workdir'], "data_dumps", condition + last_part)
return database_fname
def make_train_condition_name(config):
'''
condition name including any important hyperparams
'''
### N-train_utts doesn't account for exclusions due to train_list, bad data etc. TODO - fix?
if not config['target_representation'] == 'sample':
jstreams = '-'.join(config['stream_list_join'])
tstreams = '-'.join(config['stream_list_target'])
return '%s_utts_jstreams-%s_tstreams-%s_rep-%s'%(config['n_train_utts'], jstreams, tstreams, config.get('target_representation', 'twopoint'))
else:
streams = '-'.join(config['stream_list_target'])
return '%s_utts_streams-%s_rep-%s'%(config['n_train_utts'], streams, config.get('target_representation', 'twopoint'))
def read_label(labfile, quinphone_regex):
'''
Return list with entries like: ((start_frame, end_frame), [ll,l,c,r,,rr,state_number]).
The typical labels input mean that end frame of item at t-1 is same as start frame at t.
'''
f = open(labfile, 'r')
lines = f.readlines()
f.close()
outlabel = []
for line in lines:
start,end,lab = re.split('\s+', line.strip(' \n'))[:3]
quinphone = extract_quinphone(lab, quinphone_regex) # lab.split('/5:')[0] # (':')[0]
state = lab.strip(']').split('[')[-1]
newlab = list(quinphone) + [state]
#for thing in newlab:
# assert label_delimiter not in thing, 'quinphone plus state (%s) contains label_delimiter (%s)'%(newlab, label_delimiter)
#newlab = label_delimiter.join(newlab)
start_frame = int(start) / 50000
end_frame = (int(end) / 50000) ## TODO: de-hardcode frameshift
#length_in_frames = (int(end) - int(start)) / 50000
#print length_in_frames
outlabel.append(((start_frame, end_frame), newlab))
return outlabel
def get_cutpoints(timings, pms, sample_rate):
'''
Find GCIs which are nearest to the start and end of each unit.
Also return indices of GCIs so we can map easily to pitch-synchronous features.
'''
cutpoints = []
indices = []
for (start, end) in timings:
start_sec = start * 0.005 # TODO: unhardcode frameshift and rate
end_sec = (end) * 0.005
start_closest_ix = numpy.argmin(numpy.abs(pms - start_sec))
end_closest_ix = numpy.argmin(numpy.abs(pms - end_sec))
indices.append((start_closest_ix, end_closest_ix))
cutpoints.append((pms[start_closest_ix], pms[end_closest_ix]))
indices = np.array(indices, dtype=int)
cutpoints = np.array(cutpoints)
cutpoints *= sample_rate
cutpoints = np.array(cutpoints, dtype=int)
return (cutpoints, indices)
def get_halfphone_stats(speech, labels, representation_type='twopoint'):
'''
Where there are N hafphones in an utt, return (names, features, timings) where
-- names is N-element array like (array(['xx~xx-#_L+p=l', 'xx~xx-#_R+p=l', 'xx~#-p_L+l=i', ...
-- timings is N-element list like [(0, 40), (41, 60), (61, 62), ...
-- features is N x D array, where D is size of feature vector
To get from s 5-state alignment for a phone to 2 halfphones, we arbitrarily
assign states 1 & 2 to halfphone 1, and states 3, 4 and 5 to halfphone 2.
Given this division, various types of representation are possible. A unit can
be represented by:
-- onepoint: middle frame appearing in it
-- twopoint: first and last frames appearing in it
-- threepoint: first, middle, and last frames appearing in it
We use state alignment in effect to do downsmpling which is non-linear in time.
Hence, the 'middle' point is not necessarily equidistant from the start and end
of a unit, but rather the last frame in state 2 (for first halfphone) or in state
5 (for second halfphone). Other choices for middle frame are possible.
'''
if 0:
print speech
print labels
print speech.shape
print len(labels)
sys.exit('stop here 8293438472938')
if representation_type not in ['onepoint', 'twopoint', 'threepoint']:
sys.exit('Unknown halfphone representation type: %s '%(representation_type))
m,dim = speech.shape
assert len(labels) % 5 == 0, 'There must be 5 states for each phone in label'
nphones = len(labels) / 5
features = numpy.zeros((nphones*2, dim*2))
names = []
starts = []
middles = []
ends = []
halfphone_counter = 0
for ((s,e),lab) in labels:
#print ((s,e),lab)
if e > m-1:
e = m-1
assert len(lab) == 6
#quinphone_plus_state = lab.split(label_delimiter) # lab.split('_')[0]
quinphone = lab[:5]
state = lab[-1]
debug( '%s/%s halfphones' %(halfphone_counter, nphones*2) )
debug( 's,e: %s %s'%(s,e) )
if state == '2':
halfphone_name = copy.copy(quinphone)
halfphone_name[2] += '_L'
assert label_delimiter not in ''.join(halfphone_name), 'delimiter %s occurs in one or more name element (%s)'%(label_delimiter, halfphone_name)
halfphone_name = label_delimiter.join(halfphone_name)
names.append(halfphone_name)
#features[halfphone_counter, :dim] = speech[s,:]
#if representation_type in ['twopoint', 'threepoint']:
starts.append(s)
#if representation_type in ['onepoint', 'threepoint']:
middles.append(e)
elif state == '3':
#features[halfphone_counter, dim:] = speech[e,:]
# if representation_type in ['twopoint', 'threepoint']:
ends.append(e)
#halfphone_counter += 1
elif state == '4':
halfphone_name = copy.copy(quinphone)
halfphone_name[2] += '_R'
assert label_delimiter not in ''.join(halfphone_name), 'delimiter %s occurs in one or more name element (%s)'%(label_delimiter, halfphone_name)
halfphone_name = label_delimiter.join(halfphone_name)
names.append(halfphone_name)
#features[halfphone_counter, :dim] = speech[s,:]
# if representation_type in ['twopoint', 'threepoint']:
starts.append(s)
elif state == '5':
# if representation_type in ['onepoint', 'threepoint']:
middles.append(e)
elif state == '6':
#features[halfphone_counter, dim:] = speech[e,:]
# if representation_type in ['twopoint', 'threepoint']:
ends.append(e)
#halfphone_counter += 1
else:
sys.exit('bad state number')
assert len(names) == nphones*2 == len(starts) == len(ends) == len(middles)
# if representation_type in ['twopoint', 'threepoint']:
# assert len(names) == len(starts) == len(ends)
# if representation_type in ['onepoint', 'threepoint']:
# assert len(names) == len(middles)
names = np.array(names)
timings = zip(starts,ends)
### construct features with advanced indexing:--
if representation_type == 'onepoint':
features = speech[middles, :]
elif representation_type == 'twopoint':
features = np.hstack([speech[starts,:], speech[ends,:]])
elif representation_type == 'threepoint':
features = np.hstack([speech[starts,:], speech[middles,:], speech[ends,:]])
else:
sys.exit('eifq2o38rf293f')
return (names, features, timings)
def get_prosody_targets(speech, timings, ene_dim=0, lf0_dim=-1, fshift_sec=0.005):
'''
Return list of triplets, containing (dur,ene,lf0) where these are averages over
the halfphone. If all speech in the halfphone is unvoiced, return negative lf0
value, else the mean of the voiced frames.
'''
prosody_targets = []
for (start, end) in timings:
energy = speech[start:end, ene_dim].mean()
pitch = speech[start:end, lf0_dim]
voiced = pitch[pitch>0.0]
if voiced.size==0:
pitch = -1000.0
else:
pitch = voiced.mean()
duration = (end - start) * fshift_sec
duration = duration * 1000.0 ## to msec
prosody_targets.append((duration, energy, pitch))
return prosody_targets
def get_halfphone_lengths(label):
'''
Take state timings, return monophone labels and halfphone durations.
We arbitrarily assign states 1 & 2 to halfphone 1, and states 3, 4 and 5 to halfphone 2.
'''
vals = []
curr_dur = 0
for ((start,end), quinphone) in label:
dur = end - start
monophone = quinphone[2]
state = quinphone[-1]
curr_dur += dur
if state == '3': ## NB: HTK numbering starts at 2
vals.append((monophone + '_L', curr_dur))
curr_dur = 0
if state == '6':
vals.append((monophone + '_R', curr_dur))
curr_dur = 0
return vals
def get_norm_durations(unit_names, timings, duration_stats, oov_stats=(5.0, 5.0)):
'''
Take quinphone names and timings for halfphones, return frame normed durations
'''
monophones = [name.split('/')[2] for name in unit_names]
durations = [e-s for (s,e) in timings]
means = [duration_stats.get(mono, oov_stats)[0] for mono in monophones]
stds = [duration_stats.get(mono, oov_stats)[1] for mono in monophones]
norm_durations = [(duration-mean)/std for (duration, mean, std) in zip(durations, means, stds)]
norm_durations = np.array(norm_durations).reshape((-1,1))
return norm_durations
def get_contexts_for_pitch_synchronous_joincost(speech, pm_indices):
'''
pm_indices: start and end indices of pitchmarks considered to be unit cutpoints:
[[ 0 1]
[ 1 5]
[ 5 12]
[ 12 17] ...
Because speech features used for join cost are expected to be already pitch synchronous or
synchronised, we can index rows of speech directly with these.
Where pm_indices gives indices for n units (n rows), return (n+1 x dim) matrix, each row of which
gives a 'join context' for the end of a unit. Row p gives the start join context for
unit p, and the end join context for unit p-1.
'''
# enforce that t end is same as t+1 start -- TODO: do this check sooner, on the labels?
assert pm_indices[1:, 0].all() == pm_indices[:-1, 1].all()
## convert n -> n+1 with shared indices:
last_end = pm_indices[-1][1]
starts = np.array([s for (s,e) in pm_indices] + [last_end])
# print '=========='
# print speech.shape
# print starts
context_frames = speech[starts, :]
return context_frames
def get_contexts_for_natural_joincost(speech, timings, width=2):
'''
TODO: defaults to natural2
timings: start and end frame indices: [(0, 2), (2, 282), (282, 292), (292, 297), (297, 302)]
Where timings gives times for n units, return (n+1 x dim) matrix, each row of which
gives a 'join context' for the end of a unit. Row p gives the start join context for
unit p, and the end join context for unit p-1. Explain 'natural join' ...
'''
assert width % 2 == 0, 'context width for natural joincost should be even valued'
label_frames = timings[-1][1]
speech_frames, dim = speech.shape
## Note: small mismatches happen a lot with Blizzard STRAIGHT data, but never with world data prepared by Oliver
## Where they occur, use zero padding:
### ===== This should no longer be necessary (done in a previous step) =====
# if label_frames > speech_frames:
# padding_length = label_frames - speech_frames
# speech = np.vstack([speech, np.zeros((padding_length, dim))])
## starting parts
last_end = timings[-1][1]
starts = np.array([s for (s,e) in timings] + [last_end])
### reduplicate start and end frames -- assuming silence at end of utts, this gives a reasonable context
halfwidth = width / 2
prepadding = numpy.tile(speech[0,:], (halfwidth, 1))
postpadding = numpy.tile(speech[-1,:], (halfwidth, 1))
speech = numpy.vstack([prepadding, speech, postpadding])
frames = segment_axis(speech, width, overlap=width-1, axis=0)
context_frames = frames[starts,:,:]
## flatten the last 2 dimensions of the data:--
context_frames = context_frames.reshape((-1, width*dim))
return context_frames
def get_join_data_AL(speech, pm_indices, halfwidth):
'''
Newer version: pitch synchronous features.
Output of this operation can be used by later scripts for actively learning a join cost.
pm_indices: start and end indices of pitchmarks considered to be unit cutpoints:
[[ 0 1]
[ 1 5]
[ 5 12]
[ 12 17] ...
Because speech features used for join cost are expected to be already pitch synchronous or
synchronised, we can index rows of speech directly with these.
'''
# enforce that t end is same as t+1 start -- TODO: do this check sooner, on the labels?
assert pm_indices[1:, 0].all() == pm_indices[:-1, 1].all()
starts = pm_indices[:,0]
ends = pm_indices[:,1]
start_speech = copy.copy(speech)
if starts[-1] + halfwidth > ends[-1]:
difference = starts[-1] + halfwidth - ends[-1]
padding = speech[-1,:].reshape((1,-1))
start_speech = np.vstack([start_speech] + difference * [padding])
start_speech = segment_axis(start_speech, halfwidth, overlap=halfwidth-1, axis=0)
start_contexts = start_speech[starts,:,:].reshape((len(starts), -1))
end_speech = copy.copy(speech)
if ends[0] - (halfwidth+1) < 0:
difference = (ends[0] - (halfwidth+1)) * -1
padding = speech[0,:].reshape((1,-1))
end_speech = np.vstack(difference * [padding] + [end_speech])
ends -= (halfwidth+1)
end_speech = segment_axis(end_speech, halfwidth, overlap=halfwidth-1, axis=0)
end_contexts = end_speech[ends,:,:].reshape((len(ends), -1))
return (start_contexts, end_contexts)
##### fixed framerate version:
# def get_join_data_AL(speech, timings, halfwidth):
# '''
# Output of this operation is not yet used -- it will be used for actively learned join cost
# '''
# print speech
# print timings
# sys.exit('wefswrb545')
# # pylab.plot(speech)
# # pylab.show()
# ## starting parts
# starts = [s for (s,e) in timings]
# ## do we need to pad the end of the speech?
# m,n = speech.shape
# ##print 'N'
# ##print n
# if max(starts) + halfwidth > m:
# diff = (max(starts) + halfwidth) - m
# start_speech = np.vstack([speech, np.zeros((diff, n))])
# debug('correct start')
# else:
# start_speech = speech
# #print start_speech.shape
# frames = segment_axis(start_speech, halfwidth, overlap=halfwidth-1, axis=0)
# #print frames.shape
# start_frames = frames[starts,:,:]
# #print start_frames.shape
# ends = np.array([e for (s,e) in timings])
# ## do we need to pad the start of the speech?
# if min(ends) - halfwidth < 0:
# diff = 0 - (min(ends) - halfwidth)
# end_speech = np.vstack([np.zeros((diff, n)), speech])
# ends += diff
# debug('correct end')
# else:
# end_speech = speech
# ends -= halfwidth ### to get starting point of end segments
# frames = segment_axis(end_speech, halfwidth, overlap=halfwidth-1, axis=0)
# #print frames.shape
# end_frames = frames[ends,:,:]
# ## flatten the last 2 dimensions of the data:--
# #print start_frames.shape
# #print halfwidth, n
# start_frames = start_frames.reshape((-1, halfwidth*n))
# end_frames = end_frames.reshape((-1, halfwidth*n))
# return (start_frames, end_frames)
def get_mean_std(feat_dir_dict, stream_list, datadims, flist):
means = {}
stds = {}
for stream in stream_list:
stream_files = [os.path.join(feat_dir_dict[stream], base+'.'+stream) for base in flist]
if stream in vuv_stream_names:
means[stream], _ = get_mean(stream_files, datadims[stream], exclude_uv=True)
stds[stream] = get_std(stream_files, datadims[stream], means[stream], exclude_uv=True)
else:
means[stream], nframe = get_mean(stream_files, datadims[stream])
stds[stream] = get_std(stream_files, datadims[stream], means[stream])
mean_vec = []
for stream in stream_list:
mean_vec.append(means[stream])
# if stream in vuv_stream_names: ## add fake stats for VUV which will leave values unaffected
# mean_vec.append(numpy.zeros(means[stream].shape))
std_vec = []
for stream in stream_list:
std_vec.append(stds[stream])
# if stream in vuv_stream_names: ## add fake stats for VUV which will leave values unaffected
# std_vec.append(numpy.ones(stds[stream].shape))
mean_vec = np.hstack(mean_vec)
std_vec = np.hstack(std_vec)
return mean_vec, std_vec
def pad_speech_to_length(speech, labels):
'''
Small mismatches happen a lot with Blizzard STRAIGHT data, so need some hacks to handle them.
This is rarely/never an issue with world data and labels prepared by Ossian
'''
m,dim = speech.shape
nframe = labels[-1][0][1]
if math.fabs(nframe - m) > label_length_diff_tolerance:
print 'Warning: number frames in target cost speech and label do not match (%s vs %s)'%(m, nframe)
return numpy.array([[0.0]])
## Note: small mismatches happen a lot with Blizzard STRAIGHT data, but never with world data prepared by Oliver
## Where they occur, use zero padding:
if nframe > m:
padding_length = nframe - m
speech = np.vstack([speech, np.zeros((padding_length, dim))])
elif nframe < m:
speech = speech[:nframe,:]
return speech
def get_waveform_fragments(wave_fname, rate, context_length, nonlin_wave=True, norm=np.zeros((0)), wave_context_type=0):
'''
wave_context_type: 0 =
if wave_context_type == 1: leftmost output values will correspond to rightmost (most recent) waeform samples
'''
wave, fs = read_wave(wave_fname)
assert fs == rate
if wave_context_type == 0:
wavefrag_length = context_length
elif wave_context_type == 1:
DILATION_FACTOR = 1.2
filter_matrix = varying_filter.make_filter_01(DILATION_FACTOR, context_length)
wavefrag_length, nfeats = filter_matrix.shape
assert nfeats == context_length
else:
sys.exit('unknown wave_context_type: %s'%(wave_context_type))
wave = np.concatenate([np.zeros(wavefrag_length), wave])
# print 'Linear wave stats:'
# print wave.mean()
# print wave.std()
if nonlin_wave:
wave = lin2mu(wave)
# print 'Prenormed omed mulaw wave stats:'
# print wave.mean()
# print wave.std()
if NORMWAVE:
if norm.size > 0:
assert norm.size == 2
(mu, sigma) = norm
# import pylab
# pylab.subplot(211)
# pylab.plot(wave)
#print type(wave[0])
wave = (wave - mu) / sigma
#print wave[:10]
# pylab.subplot(212)
# pylab.plot(wave)
# pylab.show()
#sys.exit('esdvsvsdfbv0000')
# print 'Nomed mulaw wave stats:'
# print wave.mean()
# print wave.std()
# print 'Normed with:'
# print (mu, sigma)
frags = segment_axis(wave, wavefrag_length+1, overlap=wavefrag_length, axis=0)
context = frags[:,:-1]
next_sample = frags[:,-1].reshape((-1,1))
if wave_context_type > 0:
context = np.dot(context, filter_matrix)
return (context, next_sample)
def get_wave_mean_std(wav_datadir, flist, rate, nonlin_wave=True, nutts=100):
'''
By default, find mean and std of 1st 100 sentences only
'''
waves = []
for fname in flist[:min(nutts,len(flist))]:
wave_fname = os.path.join(wav_datadir, fname + '.wav')
wave, fs = read_wave(wave_fname)
assert fs == rate
if nonlin_wave:
wave = lin2mu(wave)
waves.append(wave)
waves = np.concatenate(waves)
mu = waves.mean()
sigma = waves.std()
return np.array([mu, sigma])
if __name__ == '__main__':
#################################################
# ======== process command line ==========
a = ArgumentParser()
a.add_argument('-c', dest='config_fname', required=True)
a.add_argument('-X', dest='overwrite_existing_data', action='store_true', \
help= "clear any previous training data first")
opts = a.parse_args()
config = {}
execfile(opts.config_fname, config)
del config['__builtins__']
print config
main_work(config, overwrite_existing_data=opts.overwrite_existing_data)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify various configuration methods
"""
# externals
import pyre
# the protocols
class functor(pyre.protocol, family="quad.functors"):
@pyre.provides
def eval(self, z):
"""evaluate a function at the given argument {z}"""
# components
# functors
class const(pyre.component, family="quad.functors.const"):
c = pyre.properties.float()
@pyre.export
def eval(self, z): return self.c
class line(pyre.component, family="quad.functors.line"):
α = pyre.properties.float()
β = pyre.properties.float()
@pyre.export
def eval(self, z): return self.α * z + self.β
class integrator(pyre.component, family="quad.integrator"):
integrand = functor(default=const)
# the tests
def test():
# print the configuration
# pyre.executive.configurator.dump(pattern='quad')
# for error in pyre.executive.errors: print(error)
# check the class defaults from the configuration file
# const
assert const.c == 1.0
# line
assert line.α == 1.0
assert line.β == 2.0
# integrator
assert integrator.integrand.pyre_family() == line.pyre_family()
# instantiations
zero = const(name='zero')
assert zero.c == 0
two = const(name='two')
assert two.c == 2.0
# a default integrator
nameless = integrator(name='nameless')
assert nameless.pyre_name == 'nameless'
assert nameless.pyre_family == integrator.pyre_family
assert nameless.integrand.pyre_name == 'nameless.integrand'
assert nameless.integrand.pyre_family() == line.pyre_family()
assert nameless.integrand.α == line.α
assert nameless.integrand.β == line.β
# a named one
special = integrator(name='special')
assert special.pyre_name == 'special'
assert special.pyre_family == integrator.pyre_family
assert special.integrand.pyre_name == 'special.integrand'
assert special.integrand.pyre_family() == const.pyre_family()
assert special.integrand.c == 3.0
# another named one
qualified = integrator(name='qualified')
assert qualified.pyre_name == 'qualified'
assert qualified.pyre_family == integrator.pyre_family
assert qualified.integrand.pyre_name == 'qualified.integrand'
assert qualified.integrand.pyre_family() == line.pyre_family()
assert qualified.integrand.α == 0.5
assert qualified.integrand.β == 1.5
# a named one with an explicitly named integrand
explicit = integrator(name='explicit')
assert explicit.pyre_name == 'explicit'
assert explicit.pyre_family == integrator.pyre_family
assert explicit.integrand.pyre_name == 'two'
assert explicit.integrand.pyre_family == const.pyre_family
assert explicit.integrand.c == 2.0
# all done
return
# main
if __name__ == "__main__":
test()
# end of file
|
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''An implementation of the Google BigQuery Standard SQL syntax.
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
Differences from this grammar occur for two reasons.
- The original grammar is left recursive, and this is a recursive descent
parser, so we need to juggle things to make that work.
In addition, some expressions have their own unique grammar; these are not necessarily implemented.
In a recursive descent parser: each grammar rule corresponds to a function. Each function
takes a list of tokens, and returns a pair, of the identified node and the remaining unparsed
tokens, or None and all the tokens if the rule doesn't match.
To simplify the grammar, we allow rules to be specified in a few ways that are not Python functions.
A rule that is a literal string means a rule matching that string.
A rule that is a tuple of rules means all of the contained rules must match in order.
A rule that is a list of rules means exactly one of the rules must match.
A rule that is None matches zero tokens; it's a placeholder to allow, with lists, for rules to
be optional.
This logic is implemented by the apply_rule function. All rules must use this function to apply
a rule to input tokens.
Given this, we still implement some rules as functions. This is for one of two reasons.
- The function uses parsing logic other than recursive descent; data_source is an example.
- The rule depends on other rules that also depend on this rule, so we define
one as a function to allow the mutual recursion (since Python doesn't have let rec).
Note that this file is a mix of 'constants' and functions. All grammar rules are notionally
functions because this is a recursive descent parser, but the apply_rule helper function lets us
write simpler rules as constants (tuples, lists, etc). More complex rules are defined as actual
functions. Therefore constants are formatted the same as functions here, because they may become
become them as they grow in complexity.
'''
from typing import List, cast # noqa: F401
from .bq_abstract_syntax_tree import EMPTY_NODE, Field
from .bq_operator import binary_operator_expression_rule
from .dataframe_node import QueryExpression, Select, SetOperation, TableReference, Unnest
from .evaluatable_node import (Array, Array_agg, Case, Cast, Count, Exists, Extract, FunctionCall,
If, InCheck, Not, NullCheck, Selector, StarSelector, Struct,
UnaryNegation)
from .join import DataSource, FromItemType, Join
from .query_helper import AppliedRuleOutputType # noqa: F401
from .query_helper import apply_rule, separated_sequence, wrap
from .terminals import grammar_literal, identifier, literal
from .type_grammar import array_type, scalar_type, struct_type
def field(tokens):
# type: (List[str]) -> AppliedRuleOutputType
'''A field is a column reference in the format TableName.ColumnName or just ColumnName.
Args:
tokens: Parts of the user's query (split by spaces into tokens) that
are not yet parsed
Returns:
A tuple of the Field node representing the result of applying the rule
to the tokens, and the remaining unmatched tokens
'''
# TableName.ColumnName rule: (identifier, '.', identifier)
# ColumnName rule: identifier
field_path, new_tokens = apply_rule(
[(identifier, '.', identifier), identifier],
tokens)
# Field initializer always expects a tuple, but identifier() will return a
# string
if not field_path:
return None, tokens
elif not isinstance(field_path, tuple):
field_path = (field_path,)
return Field(field_path), new_tokens
def core_expression(tokens):
# type: (List[str]) -> AppliedRuleOutputType
"""Grammar rule for a core set of expressions that can be nested inside other expressions.
The current set of handled expressions are:
- Array
- Array_agg
- Count
- Function call
- A field (column)
- Case
- A literal (number, string, etc)
- If
- Cast
- Exists
- Extract
- Another expression nested in parentheses
- Not
- Unary negation
"""
return apply_rule(
[
# COUNT(*), COUNT(DISTINCT expression), COUNT(expression)
wrap(Count.create_count_function_call,
('COUNT', '(', ['*', (['DISTINCT', None], expression)], ')', [over_clause, None])),
wrap(Array_agg.create_function_call,
('ARRAY_AGG', '(', ['DISTINCT', None], expression,
[(['IGNORE', 'RESPECT'], 'NULLS'), None],
[(grammar_literal('ORDER', 'BY'),
separated_sequence(identifier, ['ASC', 'DESC', None], ',')),
None],
[('LIMIT', literal), None],
')',
[over_clause, None])),
wrap(FunctionCall.create,
(identifier, '(', [separated_sequence(expression, ','), None], ')', [over_clause,
None])),
field,
(Case, [expression, None], 'WHEN',
separated_sequence((expression, 'THEN', expression), 'WHEN'),
[('ELSE', expression), None], 'END'),
literal,
(If, '(', expression, ',', expression, ',', expression, ')'),
(Cast, '(', expression, 'AS', scalar_type, ')'),
(Exists, '(', query_expression, ')'),
array_expression,
(Extract, '(', identifier, 'FROM', expression, ')'),
# Typeless struct syntax
wrap(Struct.create_from_typeless,
('STRUCT', '(', separated_sequence((expression, [('AS', identifier), None]), ','),
')',)),
# Typed struct syntax
wrap(Struct.create_from_typed,
(struct_type, '(', separated_sequence(expression, ','), ')')),
# Tuple syntax for structs (anonymous column names, implicit types)
wrap(Struct.create_from_tuple,
('(', expression, ',', separated_sequence(expression, ','), ')')),
('(', expression, ')'),
(Not, expression),
(UnaryNegation, '-', expression),
],
tokens)
def post_expression(tokens):
"""Grammar rule for expressions that occur only after a core expression.
For example:
<core_expression> IS NULL
<core_expression> IN (a, b, c)
If the query has none of these, it can still match a plain `core_expression`,
the last item in the list. [Currently this is the only thing implemented.]
"""
return apply_rule(
[
(NullCheck, core_expression, [grammar_literal('IS', 'NULL'),
grammar_literal('IS', 'NOT', 'NULL')]),
(InCheck, core_expression, ['IN', grammar_literal('NOT', 'IN')],
'(', separated_sequence(expression, ','), ')'),
core_expression,
],
tokens)
# Grammar rule for expressions that can be nested inside other expressions.
# It can be a plain core_expression, a post_expression (core_expression with
# additional content at the end), or a sequence of post_expressions separated by
# binary operators.
expression = binary_operator_expression_rule(post_expression)
# Grammar rule for an array-typed exprsession
array_expression = (
Array, [array_type, None], '[', [separated_sequence(expression, ','), None], ']')
# Grammar rule for a clause added to analytic expressions to specify what window the function
# is evaluated over.
#
# See full syntax here:
# https://cloud.google.com/bigquery/docs/reference/standard-sql/analytic-function-concepts#analytic-function-syntax
# Window identifiers and window frames are not supported.
over_clause = ('OVER',
'(',
[('PARTITION', 'BY', separated_sequence(expression, ',')), None],
[('ORDER', 'BY', separated_sequence((expression, ['ASC', 'DESC', None]), ',')),
None],
')')
# [Optional] "ORDER BY" followed by some number of expressions to order by, and a sort direction
maybe_order_by = [('ORDER', 'BY',
separated_sequence((expression, ['ASC', 'DESC', None]), ',')),
None]
# [Optional] "LIMIT" followed by the number of rows to return, and an optional
# "OFFSET" to indicate which row to start at
maybe_limit = [('LIMIT', literal, [('OFFSET', literal), None]), None]
# A set operator, combining the results of separate queries.
set_operator = [
grammar_literal('UNION', 'ALL'),
grammar_literal('UNION', 'DISTINCT'),
grammar_literal('INTERSECT', 'DISTINCT'),
grammar_literal('EXCEPT', 'DISTINCT')]
def query_expression(tokens):
# type: (List[str]) -> AppliedRuleOutputType
'''This is the highest-level grammar method. It is called by query.execute_query().
The "raw" rule syntax is supposedly this:
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
query_expression = ([with_expression, None],
[select,
('(', query_expression, ')'),
(query_expression, set_operator, query_expression)],
maybe_order_by,
maybe_limit)
However, (a) this is left-recursive, so we would need to juggle things to avoid
infinite recursion, and (b) this permits things like
WITH query1 as (SELECT 1) WITH query2 as (SELECT 2) SELECT 3 UNION ALL SELECT 4
that (1) seem kind of strange (two successive with clauses)
and (2) give an error on prod BigQuery.
So we use the following simpler syntax.
Args:
tokens: Parts of the user's query (split by spaces into tokens) that
are not yet parsed
Returns:
A tuple of the Abstract Syntax Tree nodes representing the result of
applying the rule to the tokens, and the remaining unmatched tokens.
'''
core_query_expression = (
QueryExpression,
[with_expression, None],
[select,
('(', query_expression, ')')],
# order_by
maybe_order_by,
# limit
maybe_limit)
return apply_rule(
[(SetOperation, core_query_expression, set_operator, query_expression),
core_query_expression],
tokens)
with_expression = ('WITH', separated_sequence((identifier, 'AS', '(', query_expression, ')'), ','))
def alias(tokens):
# type: (List[str]) -> AppliedRuleOutputType
'''An optional alias to rename a field or table.
Args:
tokens: Parts of the user's query (split by spaces into tokens) that
are not yet parsed
Returns:
A tuple of the matched alias identifier (if any), and the remaining tokens.
'''
alias_node, new_tokens = apply_rule([
(['AS', None], identifier),
None
], tokens)
if alias_node == EMPTY_NODE:
return EMPTY_NODE, tokens
if not (isinstance(alias_node, tuple) and len(alias_node) == 2):
raise RuntimeError("Internal parse error: alias rule returned {!r}".format(alias_node))
# The alias node will be a tuple of ('AS', new_name), so we get rid of the
# 'AS' and just return the new name (alias identifier)
as_token, alias_identifier = alias_node
return alias_identifier, new_tokens
def select(tokens):
# type: (List[str]) -> AppliedRuleOutputType
'''Grammar rule matching a select clause.
This rule is adapted from here:
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select-list
Args:
tokens: Parts of the user's query (split by spaces into tokens) that
are not yet parsed
Returns:
A tuple of the Abstract Syntax Tree nodes representing the result of
applying the rule to the tokens, and the remaining unmatched tokens.
'''
return apply_rule(
(Select,
# modifier
['ALL', 'DISTINCT', None],
# fields
separated_sequence(
# The selector either contains a * or is just an expression
[
# If selector's expression includes a '*', it can also include
# exception and replacement
(
StarSelector,
# expression - can be just * or field.* or table.field.*, etc
[(expression, '.'), None],
'*',
# exception
[('EXCEPT', '(', separated_sequence(identifier, ','), ')'), None],
# replacement
[('REPLACE',
'(',
separated_sequence((expression, ['AS', None], identifier), ','),
')'),
None],
),
# The selector does not include a * and therefore cannot have
# an exception or replacement
(Selector, expression, alias),
],
# separator for separated_sequence()
','
),
# from
[('FROM', data_source), None],
# where
[('WHERE', expression), None],
# group by
[('GROUP', 'BY', separated_sequence([field, literal], ',')), None],
# having
[('HAVING', expression), None],
),
tokens)
# Expression following "FROM": a table or another query, followed optionally by an AS alias
# Examples: "SomeTable", "(SELECT a from SomeTable)", "SomeTable AS t"
from_item = ([
(TableReference, separated_sequence(identifier, '.')),
('(', query_expression, ')'),
(Unnest, '(', array_expression, ')'),
], alias)
join_type = ['INNER',
'CROSS',
grammar_literal('FULL', 'OUTER'), 'FULL',
grammar_literal('LEFT', 'OUTER'), 'LEFT',
grammar_literal('RIGHT', 'OUTER'), 'RIGHT',
None]
def data_source(orig_tokens):
# type: (List[str]) -> AppliedRuleOutputType
'''Includes the initial FROM expression as well as any following JOINs.
This describes everything that comes after a FROM, essentially in the form:
from_item JOIN from_item ON on_expression JOIN from_item ON on_expression JOIN ...
The first from_item is called first_from and is required.
After that is any number of repetititions of ('JOIN', from_item, (ON, on_expression)),
and is parsed into an array (possibly empty) called joins.
Args:
orig_tokens: Parts of the user's query (split by spaces into tokens)
that are not yet parsed
Returns:
A tuple of the Abstract Syntax Tree nodes representing the result of
applying the rule to the tokens, and the remaining unmatched tokens.
'''
first_from, tokens = apply_rule(from_item, orig_tokens)
if not first_from:
return None, tokens
joins = []
while True:
next_join, tokens = apply_rule([(',', from_item), # shorthand for cross-join
(join_type,
'JOIN',
from_item,
[('ON', expression),
('USING', '(', separated_sequence(identifier, ','), ')'),
None])],
tokens)
if next_join:
# This case is triggered by the shorthand cross-join above where the table to be joined
# is just specified separated by a comma, with no join type or condition specified.
if not isinstance(next_join, tuple):
raise RuntimeError("Internal error; join rule above must result in tuple not {}"
.format(next_join))
if len(next_join) == 2:
joins.append(Join('CROSS', cast(FromItemType, next_join), EMPTY_NODE))
else:
joins.append(Join(*next_join))
else:
break
return DataSource(cast(FromItemType, first_from), joins), tokens
|
import csv
# TODO
# Below are measurements for failing transparent http proxies:
# https://explorer.ooni.org/measurement/20170509T041918Z_AS5384_fSeP50M6LS3lUhIarj2WhbQNIQS8mKtvhuxEhwJOhgheEL7EsZ?input=http:%2F%2Fanonym.to
# https://explorer.ooni.org/measurement/20180914T010619Z_AS11427_d3lligD9zAEneBLYeI8Mt2JUpVBC2y5zFZ1EG3XNLo1smBQa48?input=http:%2F%2Fmoqavemat.ir
# though they don't not look like a blockpage
# TODO(explorer error)
# https://explorer.ooni.org/measurement/20170722T013346Z_AS14340_QXOwhyfxJUPRGWsCqanoOycTnbHcpU4CW3NBNXUMMbxbi3Q6I3?input=http:%2F%2Fwww.blubster.com
#https://explorer.ooni.org/measurement/20181118T123404Z_AS7922_pgeD0Ka5ySsyl55RBYXO07V82WoH0uggCYFJsvlcp2d55Tju3i?input=http:%2F%2Fwww.acdi-cida.gc.ca
# blocking locality: global > country > isp > local
# unclassified locality is named "general"
fingerprints = {
"AE": [
{
"header_name": "Server",
"header_prefix": "Protected by WireFilter",
"locality": "country",
},
# https://github.com/ooni/pipeline/issues/163
# https://explorer.ooni.org/measurement/20170423T142350Z_AS0_5EG4lO5Z8KHN2jwbqB5hQqxlC44iXq2A2YCxoASGvY5Z05KrGL?input=http:%2F%2Fwww.foxnews.com
{
"header_name": "Location",
"header_prefix": "http://www.bluecoat.com/notify-NotifyUser1",
"locality": "country"
}
],
"AF": [
# https://github.com/ooni/pipeline/issues/155#issue-437678102
# https://explorer.ooni.org/measurement/20171130T100203Z_AS38742_DmzL8KtUBadHNFU6wchlAgOy7MBwpDH75ZWdNzs0e55ArgFffx?input=http:%2F%2Fhowtogrowmarijuana.com
{
"body_match": "content=\"Access Denied - Afghan Wireless Communication Company",
"locality": "isp",
}
],
"AR": [
# https://github.com/ooni/pipeline/issues/178
# https://explorer.ooni.org/measurement/20170312T225529Z_AS0_hV7ESTIV4phKrhXxhO9NTxc5XrcXsgzPqZzEkYbrSjgPv9Xyor?input=http:%2F%2Fwww.erogeschichten.com
{
"body_match": "<title>Notificación: política: filtrado de URL</title>",
#"header_full": "1.1 FC-WSA-FibertelZone3.int.fibercorp.com.ar:80 (Cisco-WSA/10.1.0-204)",
#"header_name": "Via",
"locality": "isp",
}
],
"AU": [
# https://github.com/ooni/pipeline/issues/179
# https://explorer.ooni.org/measurement/20171106T070150Z_AS9426_z1R8gTMEhKzX69ZVymQrubL1f5FliuHXuFy6Z7TRomXpC5w1jr?input=http:%2F%2Fwww.rollitup.org
{
"body_match": "<title>Notification: Policy: URL Filtering</title>",
#"header_full": "1.1 ac2106-wsag2.wsmartwifi.com.au:80 (Cisco-WSA/10.1.1-235)",
#"header_name": "Via",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20171119T095401Z_AS1221_sSYzvupLp9kaEiQiBBS4nlqpHzsO59Eh5SIyp60Z83ah5uRXtM?input=http:%2F%2Fwww.twistedinternet.com
{
"header_name": "Location",
"header_prefix": "https://go.telstra.com.au/broadbandprotect/networkprotectionstandard",
"locality": "isp",
},
],
"BE": [
{
"body_match": "that is considered illegal according to Belgian legislation",
"locality": "country",
},
# https://github.com/ooni/pipeline/issues/168
# https://explorer.ooni.org/measurement/20171106T080745Z_AS5432_m0LkLhXH3oxwworJZUpHGmPeJV2Abk3TMxr2dz8wu04ziIUAGA?input=http:%2F%2Fguardster.com%2F
{
"header_full": "1.1 webfilter.stjohns.net (http_scan_byf/3.5.16)",
"header_name": "Via",
"locality": "local",
},
],
"BR": [
# https://github.com/ooni/pipeline/issues/180
# https://explorer.ooni.org/measurement/20171206T160707Z_AS10429_4pfvLyNqYHpbLQI9tIpuQr7CPgvOOGaZbbnm7gMIZKdBE4oXJ9?input=http:%2F%2Fcrackspider.net
{
"header_full": "1.1 wsa07.grupoamil.com.br:80 (Cisco-WSA/9.1.2-010)",
"header_name": "Via",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20181023T171547Z_AS262318_cr2e8wzvNXORo80y7pOT9iqxLDOaHAboXakfU8qnQkWh50K0cs?input=http:%2F%2Fwww.pandora.com
{
"header_full": "SonicWALL",
"header_name": "Server",
"locality": "local",
}
],
"CA": [
# https://explorer.ooni.org/measurement/20171026T125929Z_AS0_nkYRKqxCJy1PZQ9yBcsFuG61hFzZRYeio3N21CEBwot7MiikfZ?input=http:%2F%2Fwww.schwarzreport.org
# https://explorer.ooni.org/measurement/20181010T185819Z_AS5664_EeT6QJ84dSl7QaHu9Dwb5TcByIY2qiGrdtcyZlSFotmQlc53Hg?input=http:%2F%2Fwww.sportingbet.com
# https://explorer.ooni.org/measurement/20170604T114135Z_AS852_pZtNoyGV6fO5K97OJwwhM3ShmlWnuxKHLGrWbjSi4omt9KvyIi?input=http:%2F%2Fwww.xroxy.com
{
"body_match": " <title>Notification: Policy: URL Filtering</title>",
"locality": "local"
}
],
"CH": [
# https://github.com/ooni/pipeline/issues/191
# https://explorer.ooni.org/measurement/43xnHPgN3gi0kt6EhmF2VbIOfQSV5CN9TXicU0A5ChlYejSGjT24Y1noM2DJgdk8?input=http:%2F%2Fwww.wetplace.com
{
"header_name": "Location",
"header_prefix": "https://192.168.88.1/sgerror.php",
"locality": "local",
}
],
"CL": [
# https://github.com/ooni/pipeline/issues/196
# https://explorer.ooni.org/measurement/20170413T224353Z_AS0_Izup11aUZt39zCD1TSFUC1uvOmg7tO1bhwRHWEYsk2WgNrgObZ?input=http:%2F%2Fshareaza.com%2F
{
"header_full": "Kerio Control Embedded Web Server",
"header_name": "Server",
"locality": "local",
}
],
"CO": [
# https://github.com/ooni/pipeline/issues/156
# https://explorer.ooni.org/measurement/20190114T112310Z_AS262928_N3ChIPo5QSMMZ3qgA25G9QHE55suubZbPPkAfmNLkmOXuh9ZXJ?input=http:%2F%2Fwww.eurogrand.com%2F
{
"body_match": "Esta página ha sido bloqueada por disposición del Ministerio de las TIC,",
"locality": "country"
}
],
"CY": [
{
"body_match": "nba.com.cy/Eas/eas.nsf/All/6F7F17A7790A55C8C2257B130055C86F",
"locality": "country",
}
],
"DE": [
# https://github.com/ooni/pipeline/issues/181
# https://explorer.ooni.org/measurement/20180312T143953Z_AS3320_Kumlzdy0NSKyLoB0bt9PHKXp4OKItFMuYqMCw4ouQJapQFVUNR?input=http:%2F%2Fwww.hackhull.com
# https://explorer.ooni.org/measurement/20170506T130315Z_AS47610_cpNzdaQAx60UxJxEdXh54qBxxEKLexnoPdNam8KDJ181yCbZir?input=http:%2F%2Fspys.ru
{
"body_match": "<title>Notification: Policy: URL Filtering</title>",
#"header_full": "1.1 s690-10.noc.rwth-aachen.de:80 (Cisco-WSA/10.1.1-230)",
#"header_name": "Via",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20190113T190405Z_AS60068_P7XDllvakD4djyFssTl9xVyVJI5bxSVl6mCxsGFsPLd94ohP8U?input=http:%2F%2Foccupystreams.org
{
"header_full": "https://blocked.netalerts.io",
"header_name": "X-App-Url",
"locality": "isp",
},
],
"DK": [
{"body_match": "lagt at blokere for adgang til siden.", "locality": "country"},
# https://github.com/ooni/pipeline/issues/182
# https://explorer.ooni.org/measurement/20171121T193103Z_AS41746_lcM0SY6VKKQ9SnL3SUrh6aiH2hgJUkCs1zGtWuEjYTgVHqW7Lz?input=http:%2F%2Fwww.wetplace.com
{
#"header_full": "1.1 dsbpx001.dsb.dk:25 (Cisco-WSA/9.1.1-074)",
#"header_name": "Via",
"body_match": "<title>Notification: Policy: URL Filtering</title>",
"locality": "local",
},
],
"EG": [
# https://github.com/ooni/pipeline/issues/193
# https://explorer.ooni.org/measurement/20171126T053414Z_AS36992_8F2afEp0cM9V7Who1mEJUpqX2fGVbvgoZ2DfVGi3Nv3lQzypzV?input=http:%2F%2Fkh-press.com
{
"header_name": "Location",
"header_prefix": "http://notification.etisalat.com.eg/etisalat/notification/redirectionactions.html?",
"locality": "isp",
}
],
"ES": [
# https://github.com/ooni/pipeline/issues/82#issue-260659726
# https://explorer.ooni.org/measurement/20170925T151843Z_AS12338_JMQ1OWOJQQ4WsPmSNRi6HsR5w5tMSX2IgNeXhLN5wUCB7051jX?input=http:%2F%2Fwww.ref1oct.eu%2F
{
"body_match": "<title>Dominio-No-Disponible</title>",
"locality": "global",
},
# https://explorer.ooni.org/measurement/20180523T140922Z_AS12430_ZRebsyxxswrlcQhz1wEs6apHw5Br7FWNc1LenCsVR6Rkl1OCSD?input=http:%2F%2Fwww.marijuana.com
{
"header_name": "Server",
"header_full": "V2R2C00-IAE/1.0",
"locality": "local",
},
],
"FR": [
{"body_match": 'xtpage = "page-blocage-terrorisme"', "locality": "country"},
# https://github.com/ooni/pipeline/issues/184
# https://explorer.ooni.org/measurement/20170213T143932Z_AS57584_U1gMCvIFLNWIKnSrpWTLrN9oewaHiT0DdKjLaT3MiMqnBk3J3U?input=http:%2F%2Fwww.23.org
{
#"header_full": "1.1 proxy2.rmc.local:80 (Cisco-WSA/9.1.1-074)",
#"header_name": "Via",
"body_match": "<title>Notification: Policy: URL Filtering</title>",
"locality": "local",
},
],
"GB": [
# https://github.com/ooni/pipeline/issues/188
# https://explorer.ooni.org/measurement/20160401T061018Z_ehdZlDMzbUsYhYSGSWalmqKnqGIynfVHQhKvocrcKEmFeynXHd?input=Trans500.com
{
"header_name": "Location",
"header_prefix": "http://blocked.nb.sky.com",
"locality": "isp",
},
# https://explorer.ooni.org/measurement/20181118T215552Z_AS12576_LXCqBnsH90yHeMcE6LNDYwOl6IHnop0dTroWxA5NE7AhJ8vFn9?input=http:%2F%2Fwww.globalfire.tv
{
"header_full": "http://ee.co.uk/help/my-account/corporate-content-lock",
"header_name": "Location",
"locality": "isp",
},
# https://explorer.ooni.org/measurement/20181120T092944Z_AS199335_6V7Di7t3qUP7qVBYDlOHo9nxgle5NMQIDGHV50wtmCLuZTVPzU?input=http:%2F%2Fwww.microsofttranslator.com
{
"header_name": "Location",
"header_prefix": "http://Filter7external.schoolsbroadband.co.uk/access",
"locality": "isp",
},
# http://localhost:3100/measurement/20170605T124503Z_AS0_eVo3z6wXAYDVrAZDsgqiM7pPlLuKR7l4zNF8oEUrGmZ62HWU4l?input=http:%2F%2Frockettube.com
{
"header_name": "Location",
"header_prefix": "https://account.nowtv.com/broadband-buddy/blocked-pages/",
"locality": "isp"
},
# https://explorer.ooni.org/measurement/20180127T090022Z_AS5378_NK5M8lt6WbL1LOdiIooPX5PXla2DQDlAkGIK4HimIWbUpoMlj8?input=http:%2F%2Fwww.imageshack.us
{
"header_full": "www.vodafone.co.uk/contentcontrolpage/vfb-category-blocklist.html",
"header_name": "Location",
"locality": "isp",
},
# https://explorer.ooni.org/measurement/20190203T142614Z_AS60339_PIMtdDSw9QEc2C9hDgA8tx5LZDrdmUP8ZFsSkdGRX2QNnisqaM?input=http:%2F%2Fwww.peacefire.org%2Fcircumventor%2Fsimple-circumventor-instructions.html
{
"header_full": "http://three.co.uk/mobilebroadband_restricted",
"header_name": "Location",
"locality": "isp",
},
# https://explorer.ooni.org/measurement/20170404T062436Z_AS60339_QJa9jOkZrduHeY2fH5oQmChKFtqwM1RzB3RG2AAjE1IMDMXp59?input=http:%2F%2Fwww.naughty.com
{
"body_match": '<a href="https://smobile.three.co.uk/837/">Age Validation</a>',
"locality": "isp"
}
],
"GF": [{"body_match": 'xtpage = "page-blocage-terrorisme"', "locality": "country"}],
"GR": [
{
"body_match": "www.gamingcommission.gov.gr/index.php/forbidden-access-black-list/",
"locality": "country",
},
# https://github.com/ooni/pipeline/issues/187
# https://explorer.ooni.org/measurement/20140703T083044Z_xCnGLSGTjBtyHfCTRvswNcQAjkhIwkpDWJrXqyTCjeMVCECmnv?input=http:%2F%2Fwww.royalvegascasino.com%2F
{
"header_prefix": "http://1.2.3.50/ups/no_access",
"header_name": "Location",
"locality": "isp",
}
],
"HU": [
# https://github.com/ooni/pipeline/issues/223
# https://explorer.ooni.org/measurement/20190905T104808Z_AS50261_Y03a7meiLbuSNLjxa55pvOAYug9r0yjsFjlmfe64MzbDuGJ2UM?input=http://www.realbeer.com/
{
"body_match": "<title>Oops! Website blocked!</title>",
"locality": "local"
},
# https://explorer.ooni.org/measurement/20190904T091911Z_AS1955_V5wcmlVrMPVZzi9sQcgAiopUjZr8umfv8XY3zHY8rOQvbTssHU?input=http://guardster.com/
{
"body_match": "<title>Web Page Blocked</title>",
"locality": "local"
}
],
"ID": [
{
"header_name": "Location",
"header_prefix": "http://internet-positif.org",
"locality": "country",
},
# https://explorer.ooni.org/measurement/20170826T071505Z_AS24526_40Tlz7tR7m0UVaTRZi5EUtsuAjHnYuAgDlNjkec64t70Tujbo9?input=http:%2F%2Fwww.budweiser.com
{
"body_match": '<title>Blocked Page Bina Nusantara Networks</title>',
"locality": "isp",
},
],
"IN": [
# https://github.com/ooni/pipeline/issues/25#issue-154919607
# https://explorer.ooni.org/measurement/7AEt2OwqdZUzFMzyZd3bFwnCwXp7FqYCezpaoBWdkBIfxtLtX84mXBZnlPLaTUqI?input=http:%2F%2Fthepiratebay.se
{
"body_match": "The page you have requested has been blocked",
"locality": "country",
},
# TODO: maybe we would like to support defining a fingerprint by regexp?
# https://github.com/ooni/pipeline/issues/25#issuecomment-487589504
# https://explorer.ooni.org/measurement/20170915T154832Z_AS55824_qEg9opZCyJqfZJ5qFHWMR390Y1uA6eHw7j6Fx1qtU5EPE4Jnp2?input=http:%2F%2Fwww.http-tunnel.com
{
"header_prefix": "1.1 ironport1.iitj.ac.in:80 (Cisco-WSA/",
"header_name": "Via",
"locality": "local",
},
# https://github.com/ooni/pipeline/issues/25#issuecomment-487589504
# https://explorer.ooni.org/measurement/20170331T050949Z_AS55824_HYIy5Ddu5UgfGq8UYBJ8aCkuz6EYAQUivivIYXDMCt6Dr6CCPU?input=http:%2F%2Fwww.babylon-x.com
{
"header_prefix": "1.1 ironport2.iitj.ac.in:80 (Cisco-WSA/",
"header_name": "Via",
"locality": "local",
},
# TODO this looks more like a captive portal, do we want to classify it differently?
# https://explorer.ooni.org/measurement/20180723T000524Z_AS135201_lOpTIwn8aK4gWsbfmV9v3hlTy3ZKVYHRIA8dNBAafTxmFa6hVP?input=http:%2F%2Fwww.xbox.com
# https://github.com/ooni/pipeline/issues/25#issuecomment-489452067
{
"header_full": "GoAhead-Webs",
"header_name": "Server",
"locality": "local",
},
],
"IR": [{"body_match": 'iframe src="http://10.10', "locality": "country"}],
"IT": [
{
"body_match": "GdF Stop Page",
"locality": "country"
},
# https://explorer.ooni.org/measurement/20170216T161517Z_AS203469_NhQfyO3SkGoX5gdyzo1VRQTrZv1HcQgzudlItMI4YxuSUfgLib?input=http:%2F%2Fwww.pokerstars.net
{
"header_name": "Server",
"header_full": "V2R2C00-IAE/1.0",
"locality": "local",
},
# The following is not necessarily a blockpage
# https://explorer.ooni.org/measurement/3N5bdjWAdVjZubaIyAxCCAg0HiZYWfT1YLgz6cI0zRq1XTjHzBmGg49AbRxOGILi?input=http:%2F%2Fwarc.jalb.de
{
"header_full": "WebProxy/1.0 Pre-Alpha",
"header_name": "Server",
"locality": "local",
}
],
# https://github.com/ooni/pipeline/issues/192
# https://explorer.ooni.org/measurement/20180611T174527Z_AS33771_xGoXddliTIGLP3NJUBkEnEL1ukvMZKs7YvbB7RNFb3tW4OKZR7?input=http:%2F%2Fprotectionline.org%2F
#"KE": [
# {
# "header_name": "Location",
# "header_prefix": "http://159.89.232.4/alert",
# "locality": "country",
# }
#],
"KR": [
# https://github.com/ooni/pipeline/issues/131
# https://explorer.ooni.org/measurement/20181204T023538Z_AS4766_Q7tnDXKYbZxJAArIYzQwgd4y91BDuYsPZvA0MrYvUZzEVX6Olz?input=http:%2F%2Fwww.torproject.org
{
"body_match": "cm/cheongshim/img/cheongshim_block.png",
"locality": "local"
},
{
"body_match": "http://warning.or.kr",
"locality": "country"
},
{
"header_full": "http://www.warning.or.kr",
"header_name": "Location",
"locality": "country",
},
],
"KG": [
# https://github.com/ooni/pipeline/issues/122
# https://explorer.ooni.org/measurement/20180126T000430Z_AS8449_pk15Mr2LgOhNOk9NfI2EarhUAM64DZ3R85nh4Z3q2m56hflUGh?input=http:%2F%2Farchive.org
{
"header_name": "Location",
"header_full": "http://homeline.kg/access/blockpage.html",
"locality": "isp"
}
],
"KW": [
# https://github.com/ooni/pipeline/issues/174
# https://explorer.ooni.org/measurement/20170804T144746Z_AS42961_nClauBGJlQ5BgV1lAVD72Gw8omqphSogfCSLAc55zTAdlcpzTA?input=http:%2F%2Fwww.radioislam.org
{
"header_name": "Location",
"header_prefix": "http://restrict.kw.zain.com",
"locality": "isp"
}
],
"MX": [
# https://github.com/ooni/pipeline/issues/159
# https://explorer.ooni.org/measurement/20190306T161639Z_AS22908_1iCBIVT3AGu4mDvEtpl0ECfxp1oSw8UYXSg82JFQ1gOIqOvw8y?input=http:%2F%2Fwww.pornhub.com%2F
{
"header_name": "Server",
"header_full": "V2R2C00-IAE/1.0",
"locality": "local",
}
],
"MY": [
# https://github.com/ooni/pipeline/issues/35#issue-169100725
# https://explorer.ooni.org/measurement/20160802T205955Z_AS4788_3omRbM1JA9BYIMF5O5uiKEsdmUqy4kdunnKn7exzBlM2ebboDh?input=http:%2F%2Fwww.sarawakreport.org
# TODO check if this triggers false positives, which may be the case according to: https://github.com/ooni/pipeline/issues/35#issuecomment-237997890
{"body_match": "Makluman/Notification", "locality": "country"},
# TODO add support for DNS based fingerprints
# https://explorer.ooni.org/measurement/20160817T033110Z_AS4788_jk5ghw4QwieT2JOFiIqto9Z2LzCFhP05v3U0sCcaetBr50NxuU?input=http:%2F%2Fwww.sarawakreport.org%2Ftag%2F1mdb
# {"dns_match": "175.139.142.25", "locality": "country"},
],
"NO": [
{
"header_full": "http://block-no.altibox.net/",
"header_name": "Location",
"locality": "country",
}
],
"PA": [
# https://github.com/ooni/pipeline/issues/170
# https://explorer.ooni.org/measurement/20170302T184253Z_AS11556_LrU69C7D1dqVTi05dMN0jPphf601DuAzMnFBrehfFvR4ccwfoe?input=http:%2F%2Flifestream.aol.com
{
"body_match": "<p>Redirecting you to Barracuda Web Filter.</p>",
"locality": "local",
}
],
"PH": [
# https://explorer.ooni.org/measurement/20180114T101054Z_AS0_SptN5g552QQ9wpfhEBsOTeuOkrpYOQgCBc4JQXNy9GFGxarbEf?input=http:%2F%2Fhightimes.com
{
"header_name": "Location",
"header_prefix": "http://surfalert.globe.com.ph/usedpromo?dest_url",
"locality": "isp",
},
# TODO this is actually a captive portal like scenario
# https://explorer.ooni.org/measurement/20171014T140144Z_AS10139_gQG3LIHnMZH3IsSJuPmMlLM8qDj3kKfHxJJGyPblDQ1AOFFyBX?input=http:%2F%2Fwww.bittorrent.com
{
"header_full": "http://cube.sunbroadband.ph:8020/balanceToOffer/init",
"header_name": "Location",
"locality": "isp",
},
],
"PK": [
# https://github.com/ooni/pipeline/issues/160
# https://explorer.ooni.org/measurement/20180721T184612Z_AS45773_w5kQp1GbCQUbIv3VDzezjxNx1nCt3IaW7WpvrZTNvayksz9FBK?input=http:%2F%2Fwww.xbox.com
{
"header_name": "Server",
"header_full": "V2R2C00-IAE/1.0",
"locality": "local",
},
],
"PL": [
# https://github.com/ooni/pipeline/issues/224
# https://explorer.ooni.org/measurement/20190911T110527Z_AS6830_kkm1ZGUCJI4dSrRV4xl6QwG77o7EeI2PDKbwt9SPL9BBJHUsTr?input=http://www.eurogrand.com/
{
"header_full": "http://www.finanse.mf.gov.pl/inne-podatki/podatek-od-gier-gry-hazardowe/komunikat",
"header_name": "Location",
"locality": "country"
},
{
"header_prefix": "http://80.50.144.142/UserCheck/PortalMain",
"header_name": "Location",
"locality": "isp"
}
],
"PT": [
# https://github.com/ooni/pipeline/issues/225
# https://explorer.ooni.org/measurement/20190911T103335Z_AS2860_4aKH0micNlcrjnWcRknF9ghAykfMdxMkWGhXWracX2FIBY6UQb?input=http://www.roxypalace.com/
{
"body_match": "<title>Bloqueado por ordem judicial</title>",
"locality": "isp"
},
# https://explorer.ooni.org/measurement/20180910T235247Z_AS3243_LpUldhcfbVGUIyfOxF6TFfLNT1wSAMwHy54LBz6owWe0cofJIK?input=http://www.luckynugget.com
{
"body_match": "<title>Acesso bloqueado por entidade judiciária</title>",
"locality": "isp"
},
{
"header_full": "http://mobilegen.vodafone.pt/denied/dn",
"header_name": "Location",
"locality": "isp",
}
],
"QA": [
# https://explorer.ooni.org/measurement/bE35lS71t9vU2Swm2gxSdNPl9DWcaZpRizWrxyGEV7rh8srASwPnuwQIkdVoph0b?input=http:%2F%2Fanonym.to%2F
# https://github.com/ooni/pipeline/issues/66#issuecomment-307233015
{
"header_full": "http://www.vodafone.qa/alu.cfm",
"header_name": "Location",
"locality": "isp",
}
],
"RO": [
{
"body_match": "Accesul dumneavoastr\u0103 c\u0103tre acest site a fost restric\u021bionat",
"locality": "country",
}
],
"RU": [
# https://explorer.ooni.org/measurement/fDYllw7vRf71n2l4g2V2ahIlPxmd6nrpsjWemcJDWX1UDN0AT5Z5uBh4HhAdFdGB?input=http:%2F%2Fthepiratebay.se%2F
# https://github.com/ooni/pipeline/issues/26
{
"body_match": "Доступ к сайту ограничен в соответствии с Федеральными законами",
"locality": "country"
},
# https://github.com/ooni/pipeline/issues/115
# https://explorer.ooni.org/measurement/20180315T230339Z_AS15378_1OT3ZGTyarLfiET0jYHJZigX2B1oQDdJKdrjkfzq5Zqr30Lvlp?input=http:%2F%2Fqha.com.ua%2F
{
"body_match": "распространение которой в Российской Федерации запрещено! Данный ресурс включен в ЕДИНЫЙ РЕЕСТР доменных имен, указателей страниц сайтов в сети «Интернет» и сетевых адресов, позволяющих идентифицировать",
"locality": "country"
},
# https://explorer.ooni.org/measurement/20180315T100250Z_AS6789_KYIQLsidOroJuPSP8DNkM8WFYOwNEAKebucOYOHhr9bwel8Yl3?input=http:%2F%2Fqha.com.ua%2F
{
"header_name": "Location",
"header_prefix": "http://erblock.crimea.com/",
"locality": "country",
},
# https://explorer.ooni.org/measurement/20180315T160455Z_AS8359_t1zebVigtFmS7kxCOCe3P77uClvciZHmKIwutI13x3Wcpe5t9V?input=http:%2F%2Futronews.org
{
"body_match": "<iframe src=\"http://subblock.mts.ru/api",
"locality": "isp"
},
# https://explorer.ooni.org/measurement/20180315T174422Z_AS12688_7Iy8vwd6JYREOl2E6E1PJnNeCVGVKlORQNYhUJ2tKyiWjaEFkY?input=http:%2F%2Fmaidanua.org%2F
# {"dns_match": "62.33.207.196", "locality": "country"},
# {"dns_match": "62.33.207.197", "locality": "country"},
# https://explorer.ooni.org/measurement/20181229T072204Z_AS39289_xlYTdPez910QvwBFCYyD3sgv0hQq1WBgbhW3lMjIs3MVMUdGtg?input=http:%2F%2Fblackberry.com
{
# Using the Location header is a possible alternative
# "header_prefix": "http://89.185.75.227/451/",
# "header_name": "Location",
"body_match": "<h1>Доступ к запрашиваемому ресурсу закрыт.",
"locality": "country"
},
{
"body_match": "http://eais.rkn.gov.ru/",
"locality": "country"
},
{
"header_name": "Location",
"header_prefix": "http://warning.rt.ru",
"locality": "country",
},
],
"SA": [
# TODO maybe we would to classify this as knowing the vendor
# https://github.com/ooni/pipeline/issues/164
# https://explorer.ooni.org/measurement/20180717T084426Z_AS15505_0NtuQmtvJpAZG5I4V8QtVrS5PeUnqplLxvm3zDflzPm7ywFmX0?input=http:%2F%2Fwww.163.com
{
"header_name": "Location",
"header_prefix": "http://notify.bluecoat.com/notify-Notify",
"locality": "local"
},
{
"header_name": "Server",
"header_prefix": "Protected by WireFilter",
"locality": "country",
}
],
"SD": [
{
"header_full": "http://196.1.211.6:8080/alert/",
"header_name": "Location",
"locality": "isp",
},
# https://explorer.ooni.org/measurement/20181221T173618Z_AS33788_NH383fTiPbg28uZGbH9Huk4jEPJZ00IBUNrqZWLoEpbl9sx3VQ?input=http:%2F%2Fwww.pokerstars.com
{
"header_prefix": "http://196.29.164.27/ntc/ntcblock.html",
"header_name": "Location",
"locality": "isp"
},
# https://explorer.ooni.org/measurement/20181223T053338Z_AS37211_SNSyW5GQvxWDuQ2tQpJNZKqK5IyQFfXAVgTQynYjVzUvZn2FDK?input=http:%2F%2Fwww.bglad.com
{
"body_match": "<title>gateprotect Content Filter Message</title>",
"locality": "local"
}
],
"SG": [
# https://github.com/ooni/pipeline/issues/195
# https://explorer.ooni.org/measurement/20170905T231542Z_AS9874_3TJ6zyJeL17MVkTArLLsVfDxuMEPzfWB2rm4UbxiDuwtSiuNf3?input=http:%2F%2Fwww.playboy.com
{
"header_full": "http://www.starhub.com:80/personal/broadband/value-added-services/safesurf/mda-blocked.html",
"header_name": "Location",
"locality": "isp",
}
],
"TR": [
{
"body_match": "<title>Telekom\u00fcnikasyon \u0130leti\u015fim Ba\u015fkanl\u0131\u011f\u0131</title>",
"locality": "country",
},
# https://github.com/ooni/pipeline/issues/161
# https://explorer.ooni.org/measurement/20170210T045710Z_AS201411_Xu0QrPJeKuNvYdTpTs3Uv9u4usmdcNACeLPi3wtiqxzBtpOLMf?input=http:%2F%2Fwww.365gay.com
{
"body_match": "<p class=\"sub-message\">Bu <a href=\"https://www.goknet.com.tr/iletisim.html\">link</a>'e tıklayarak bize ulaşabilir, daha detaylı bilgi alabilirsiniz.</p>",
"locality": "isp",
},
# https://github.com/ooni/pipeline/issues/117
# https://explorer.ooni.org/measurement/20180403T183403Z_AS9121_FfHjDmPkC0E5UoU3JMZoXJ2KRrMVyqdeTkHchmGEqAonEU64u4?input=http:%2F%2Fbeeg.com
{
"body_match": "class=\"yazi3_1\">After technical analysis and legal consideration based on the law nr. 5651, administration measure has been taken for this website",
"locality": "country"
}
# https://explorer.ooni.org/measurement/20180403T183403Z_AS9121_FfHjDmPkC0E5UoU3JMZoXJ2KRrMVyqdeTkHchmGEqAonEU64u4?input=http:%2F%2Fbeeg.com
# {"dns_match": "195.175.254.2", "locality": "country"},
],
"UA": [
# https://github.com/ooni/pipeline/issues/121
# https://explorer.ooni.org/measurement/20180615T125414Z_AS35362_GaNRQSk6HlZ1Aa2ZD9UoGiIgu3KcLMM5M5yk5dEswhEkIprbnA?input=http:%2F%2Fvk.com%2Fminitrue
{
"body_match": "Відвідування даного ресурсу заборонено",
"locality": "country",
},
# https://explorer.ooni.org/measurement/20190228T101440Z_AS13188_LUWIsztkSQlApx6cliGdGzztCM6Hs2MFI3ybFEYuaIG5W8cQS6?input=http:%2F%2Freporter-crimea.ru
{
"header_prefix": "http://blocked.triolan.com.ua",
"header_name": "Location",
"locality": "isp"
}
],
"US": [
{
"header_full": "1.1 MLD-C-Barracuda.mld.org (http_scan_byf/3.5.16)",
"header_name": "Via",
"locality": "country",
},
{
"header_full": "1.1 forcepoint-wcg.chambersburg.localnet",
"header_name": "Via",
"locality": "country",
},
{
"header_name": "Location",
"header_prefix": "http://filter.esu9.org:8080/webadmin/deny/index.php",
"locality": "country",
},
# https://explorer.ooni.org/measurement/20170411T170124Z_AS46491_ntYaNL2kdnuHFhOpkSgje3aadGsvRW8oadtjIC71DuiX06z4yy?input=http:%2F%2Fwww.exgay.com
{
"header_name": "Location",
"header_prefix": "http://reporter.dublinschools.net/block/restricted.html",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20170504T143706Z_AS16657_2LnvAcQgpCrjBG46Fb5EKr50PIL40W0ppwNcXp9WCCatbPboXK?input=http:%2F%2Fchinadaily.com.cn
# https://explorer.ooni.org/measurement/20170915T220312Z_AS22935_6FKfune3ZuFavfORPXNul209Ffwv3jL7RyjzMJppxYju2caAoE?input=http:%2F%2Fwww.breastenlargementmagazine.com
{
"header_name": "Location",
"header_prefix": "http://ibossreporter.edutech.org/block/bp.html",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20170628T182856Z_AS25605_Lev8VClbbZplNkYfBGujzPiKFI7rHxERx9SNwOfuR1M8WCTBSZ?input=http:%2F%2Fwww.aceshigh.com
{
"header_name": "Location",
"header_prefix": "http://alert.scansafe.net/alert/process",
"locality": "local",
},
# https://explorer.ooni.io/measurement/20170722T013346Z_AS14340_QXOwhyfxJUPRGWsCqanoOycTnbHcpU4CW3NBNXUMMbxbi3Q6I3?input=http:%2F%2Fwww.blubster.com
{
"header_name": "Location",
"header_prefix": "http://184.168.221.96:6080/php/urlblock.php",
"locality": "local",
},
{
"header_name": "Location",
"header_prefix": "https://gateway.wifast.com:443/wifidog/login/",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20190205T191943Z_AS26638_QbFGhgqZ8sqXmCQrZNrNgB0RWB6EUfjFYPbKYOgaihiWLv5xNb?input=http:%2F%2Ftwitter.com%2F
{
"header_name": "Location",
"header_prefix": "https://mpswebfilterwashbu.mpls.k12.mn.us:6082/php/uid.php",
"locality": "local",
},
# https://explorer.ooni.org/measurement/20180406T152727Z_AS39942_lquPnt0vjeXydfleOdSkxyjst6VTiUWb58f3x5qlFKTSlTIQLG?input=http:%2F%2Fflirtylingerie.com%2F
# vendor: forcepoint
{
"body_match": "<title>Access to this site is blocked</title>",
"locality": "local"
},
# https://explorer.ooni.org/measurement/20171129T155619Z_AS11714_vJUMktHjy0cQGKqYqY3fgOQQLVNfnxb1V11fvP6jTXTbbTX60e?input=http:%2F%2Fwww.pandora.com
# vendor: netsweeper
{
"body_match": "It is a good idea to check to see if the NetSweeper restriction is coming from the cache of your web browser",
"locality": "local"
},
],
"VN": [
# https://github.com/ooni/pipeline/issues/186
# https://explorer.ooni.org/measurement/20130506T043500Z_IBzthJbAAPsdFZQLPrjMbAwlELzGqtabqMatKpBqqBSWnUyQnA?input=http:%2F%2Fwww.sos-reporters.net
{
"header_name": "Location",
"header_prefix": "http://ezxcess.antlabs.com/login/index.ant?url",
"locality": "local",
}
],
"ZZ": [
{
"header_name": "Location",
"header_prefix": "http://1.2.3.50/ups/no_access",
"locality": "isp",
},
# https://github.com/ooni/pipeline/issues/179
# https://explorer.ooni.org/measurement/20180125T012951Z_AS0_gyX2DUR5Q1W5V7gAlvUwnnEAH5tbEkEexlUu5qO8ZphH2uEjk6?input=http:%2F%2F8thstreetlatinas.com
{
"header_name": "Location",
"header_prefix": "http://www.webscanningservice.com/WebServicesAlertPage/WebURLAlert.aspx",
"locality": "isp",
},
# https://github.com/ooni/pipeline/issues/169
# https://explorer.ooni.org/measurement/20180724T151542Z_AS577_YiqZVd01jKCgmtm4Ixf6z2uzSBcFSsXkeN6NIDjHl1dtWZ6VrX?input=http:%2F%2Fwww.genderandaids.org
# https://github.com/ooni/pipeline/issues/171
# https://explorer.ooni.org/measurement/20171029T080350Z_AS209_xtJYWXrUShSnXnvStZUPWsVpqVhT0hOzR749tbJgzxF9OkR1Bn?input=http:%2F%2Fwww.vanguardnewsnetwork.com
# https://explorer.ooni.org/measurement/20170814T120242Z_AS8447_g6CoCriPHXMWjXJHwZ9kjJmTusVWVPYEsOKOOhF1HLwrHR29hp?input=http:%2F%2Fwww.aleph.to
# https://explorer.ooni.org/measurement/20180215T142531Z_AS31543_HutYcy6eALgop44KgGsXAsaF2i7v4feM6DP5vb2hST8nZdmWta?input=http:%2F%2Fanonymizer.secuser.com
{
"header_name": "Server",
"header_full": "Barracuda/NGFirewall",
"locality": "local"
},
# https://explorer.ooni.org/measurement/20180123T210247Z_AS7385_VLsJ9z4UYNZIMUK3Nc4cyaXctKPnnIsxIhuU97X4XWvSAubgZs?input=http:%2F%2Fwww.figleaves.com
# https://explorer.ooni.org/measurement/20171203T130750Z_AS14434_iviy15O0b7YXVRoTMaqS4O0w1w9NfVaOL4l3KoV4Y9mJEq33X0?input=http:%2F%2Fcompany.wizards.com%2F
{
"header_name": "Server",
"header_full": "BarracudaHTTP 4.0",
"locality": "local"
},
],
}
def read_fingerprints_csv():
with open("fingerprints.csv", newline="", encoding="utf-8") as f:
reader = csv.reader(f)
fingerprints = {}
for row in reader:
num, cc, body_match, header_name, header_prefix, header_full = row
if cc not in fingerprints:
fingerprints[cc] = []
d = {}
if body_match:
d["body_match"] = body_match
else:
d["header_name"] = header_name
if header_full:
d["header_full"] = header_full
else:
d["header_prefix"] = header_prefix
fingerprints[cc].append(d)
print(fingerprints)
def mock_out_long_strings(d, maxlen): # noqa
# Used for debugging
if isinstance(d, list):
for q in d:
mock_out_long_strings(q, maxlen)
return
for k, v in d.items():
if isinstance(v, dict):
mock_out_long_strings(v, maxlen)
elif isinstance(v, str):
if len(v) > maxlen:
d[k] = "..."
elif isinstance(v, list):
q = v
for v in q:
if isinstance(v, dict):
mock_out_long_strings(v, maxlen)
elif isinstance(v, str):
if len(v) > maxlen:
d[k] = "..."
|
import sys
import time
import machine
import st7789
import uos
import random
import ftext
def pick_item(sequence):
div = 0x3fffffff // len(sequence)
return sequence[random.getrandbits(30) // div]
bl = machine.Pin(4, machine.Pin.OUT)
bl.value(1)
spi = machine.SPI(
2,
baudrate=30000000,
polarity=1,
phase=1,
sck=machine.Pin(18),
mosi=machine.Pin(19))
display = st7789.ST7789(
spi, 135, 240,
reset=machine.Pin(23, machine.Pin.OUT),
cs=machine.Pin(5, machine.Pin.OUT),
dc=machine.Pin(16, machine.Pin.OUT))
display.init()
display.fill(st7789.BLACK)
fonts = ["astrol.fnt", "cyrilc.fnt", "gotheng.fnt", "greeks.fnt",
"italicc.fnt", "italiccs.fnt", "meteo.fnt", "music.fnt",
"romanc.fnt", "romancs.fnt", "romand.fnt", "romanp.fnt",
"romans.fnt", "romant.fnt", "scriptc.fnt", "scripts.fnt"]
row = 0
while True:
color = st7789.color565(
random.getrandbits(8),
random.getrandbits(8),
random.getrandbits(8))
row += 32
font_file = "/fonts/" + pick_item(fonts)
ftext.text(display, font_file, "Hello!", row, 0, color)
if row > 192:
display.fill(st7789.BLACK)
row = 0
|
DATABASE_ENGINE = 'sqlite3'
ROOT_URLCONF = 'database_files.urls'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'database_files',
'database_files.tests',
]
DEFAULT_FILE_STORAGE = 'database_files.storage.DatabaseStorage'
|
import configparser
import logging
import pandas as pd
try:
from sklearn.model_selection import KFold, GridSearchCV
except ImportError: # sklearn 0.17
from sklearn.cross_validation import KFold
from sklearn.grid_search import GridSearchCV
from . import utils as eutils
log = logging.getLogger(name=__name__)
def evaluate_model(model, X, y, folds, scoring):
"""
Evaluate a single model
Arguments:
* model: a quantgov.ml.CandidateModel
* X: array-like of document vectors with shape [n_samples x n_features]
* y: array-like of labels with shape [n_samples X n_labels]
* folds: folds to use in cross-validation
* scoring: scoring method
Returns: pandas DataFrame with model evaluation results
"""
log.info('Evaluating {}'.format(model.name))
if hasattr(y[0], '__getitem__'):
cv = KFold(folds, shuffle=True)
if '_' not in scoring:
log.warning("No averaging method specified, assuming macro")
scoring += '_macro'
else:
cv = KFold(folds, shuffle=True)
gs = GridSearchCV(
estimator=model.model,
param_grid=model.parameters,
cv=cv,
scoring=scoring,
verbose=100,
refit=False
)
gs.fit(X, y)
return pd.DataFrame(gs.cv_results_).assign(model=model.name)
def evaluate_all_models(models, X, y, folds, scoring):
"""
Evaluate a number of models
Arguments:
* models: a sequence of quantgov.ml.CandidateModel objects
* X: array-like of document vectors with shape [n_samples x n_features]
* y: array-like of labels with shape [n_samples X n_labels]
* folds: folds to use in cross-validation
* scoring: scoring method
Returns: pandas DataFrame with model evaluation results
"""
results = pd.concat(
[evaluate_model(model, X, y, folds, scoring) for model in models],
ignore_index=True
)
results = results[
['model', 'mean_test_score', 'std_test_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time']
+ sorted(i for i in results if i.startswith('param_'))
+ sorted(i for i in results
if i.startswith('split')
and '_train_' not in i
)
+ ['params']
]
return results
def write_suggestion(results, file):
"""
Given results, write the best performer to a config file.
Arguments:
* **Results**: a A DataFrame as returned by `evaluate_all_models`
* **file**: an open file-like object
"""
best_model = results.loc[results['mean_test_score'].idxmax()]
config = configparser.ConfigParser()
config.optionxform = str
config['Model'] = {'name': best_model['model']}
config['Parameters'] = {i: j for i, j in best_model['params'].items()}
config.write(file)
def evaluate(modeldefs, trainers, labels, folds, scoring, results_file,
suggestion_file):
"""
Evaluate Candidate Models and write out a suggestion
Arguments:
* **modeldefs**: Path to a python module containing a list of
`quantgov.ml.CandidateModel` objects in a module-level
variable named `models'.
* **trainers**: a `quantgov.ml.Trainers` object
* **labels**: a `quantgov.ml.Labels` object
* **folds**: folds to use in cross-validation
* **scoring**: scoring method to use
* **results_file**: open file object to which results should be written
* **suggestion_file**: open file object to which the model suggestion
should be written
"""
assert labels.index == trainers.index
models = eutils.load_models(modeldefs)
results = evaluate_all_models(
models, trainers.vectors, labels.labels, folds, scoring)
results.to_csv(results_file, index=False)
write_suggestion(results, suggestion_file)
|
class Movie:
filename = 'films/{title} ({year})'
def __init__(self, title, year):
self.title = title
self.year = year
@property
def path(self):
return self.filename.format_map(self.__dict__)
|
"""aque rm - Remove tasks from the queue.
Removes given tasks, or tasks matching given statuses. By default only removes
tasks submitted by the current user, but may operate on all tasks via `-x`.
"""
import os
import sys
from aque.commands.main import command, argument
from aque.queue import Queue
@command(
argument('-e', '--error', action='store_true', help='remove all tasks which errored'),
argument('-s', '--success', action='store_true', help='remove all tasks which succeeded'),
argument('-k', '--killed', action='store_true', help='remove all tasks which were killed'),
argument('-p', '--pending', action='store_true', help='remove all tasks which are pending'),
argument('-c', '--complete', action='store_true', help='remove all tasks which completed (same as `-esk`)'),
argument('-a', '--all', action='store_true', help='remove all tasks (same as `-eskp`)'),
argument('-x', '--all-users', action='store_true', help='affect tasks of other users as well'),
argument('-v', '--verbose', action='store_true'),
argument('tids', nargs='*', metavar='TID', help='specific task(s) to remove'),
help='remove tasks from the queue',
description=__doc__,
)
def rm(args):
statuses = set()
if args.pending:
statuses.add('pending')
if args.error:
statuses.add('error')
if args.success:
statuses.add('success')
if args.killed:
statuses.add('killed')
if args.complete:
statuses.add('error')
statuses.add('success')
statuses.add('killed')
if args.all:
statuses = (None, )
if not statuses and not args.tids:
exit(1)
base_filter = {}
if not args.all_users:
base_filter['user'] = os.getlogin()
to_delete = [int(x) for x in args.tids]
for status in statuses:
filter_ = base_filter.copy()
if status:
filter_['status'] = status
for task in args.broker.search(filter_, ['id']):
if args.verbose:
print task['id']
to_delete.append(task['id'])
args.broker.delete(to_delete)
if not args.verbose:
print 'removed', len(to_delete), 'tasks'
|
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
import tweepy
import webbrowser
import json
#Variables that contains the user credentials to access Twitter API
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
data = json.loads(data)
print(data['text'])
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = tweepy.Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
#stream.filter(track=['bitcoin', 'ethereum', 'bitcoin cash', 'ripple', 'litecoin', 'cardano', 'iota', 'dash', 'nem', 'bitcoin gold', 'monero', 'stellar'], languages=["en"])
stream.filter(languages=["en"], track=["Santa Claus Village", "Arctic Circle", "Lapland", "Finland"])
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths = [os.path.join('js', '*.js'), os.path.join('css', '*.css')]
return {'astropy.extern': paths}
|
import pandas as pd
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
# age = 1
# jobStr = 2
# maritalStr = 3
# educationStr = 4
# default = 5
# balance = 6
# carloan = 7
# communicationStr = 8
# lastContactDay = 9
# lastContactMonthStr = 10
# noOfContacts = 11
# prevAttempts = 12
# OutoComeStr = 13
# callStart = 14
# callEnd = 15
# preparing data
dataset = pd.read_csv('Machine_Learning/MachineStudy/Data/Car_Insurance.csv')
# preenchendo linhas vazias de cada coluna
dataset['Job'].fillna('no Job', inplace= True)
dataset['Marital'].fillna('no marital', inplace= True)
dataset['Education'].fillna('no Education', inplace= True)
dataset['Communication'].fillna('no comunication', inplace= True)
dataset['LastContactMonth'].fillna('No LastContactMonth', inplace= True)
dataset['Outcome'].fillna('No OutCome', inplace= True)
'''print(dataset.corr())'''
# Separando por varias dependentes e indepedentes
X = dataset.iloc[:,[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]].values
y = dataset.iloc[:, -1].values
# preenchendo as linhas vazias com a media de cada coluna com dados escalar
from sklearn.impute import SimpleImputer
imputer = SimpleImputer()
X[:, [0,4,5,6,8,10,11]] = imputer.fit_transform(X[:, [0,4,5,6,8,10,11]])
# convertendo as colunas classificatorias para matrizes binarias
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
columnTransformer = ColumnTransformer([('enconder', OneHotEncoder(), [1,2,3,7,9,12])], remainder='passthrough')
X = np.array(columnTransformer.fit_transform(X), dtype= np.str)
# obtendo o tempo entre as duas colunas de horas
new_column = []
for i in X[:, [-2, -1]]:
data1 = i[0].split(':')
data2 = i[1].split(':')
data1 = dt.datetime(1, 1, 1, int(data1[0]), int(data1[1]), int(data1[2]))
data2 = dt.datetime(1, 1, 1, int(data2[0]), int(data2[1]), int(data2[2]))
seconds = (data2 - data1).total_seconds()
new_column.append(seconds)
# deletando as duas ultimas colunas e add uma nova coluna
X = np.delete( X, np.s_[-1], axis=1)
X = np.delete( X, np.s_[-1], axis=1)
X = np.insert(X, 5, new_column, axis=1)
# separando os dados para teste e treino
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.20, random_state= 0)
# padronizando os dados para serem mais narturais
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler().fit(X_train)
X_train = sc_x.fit_transform(X_train)
X_test = sc_x.transform(X_test)
# algoritimo extra trees classifier
from sklearn.ensemble import ExtraTreesClassifier
classificationExtraTree = ExtraTreesClassifier(n_estimators= 200, random_state= 0)
classificationExtraTree.fit(X_train, y_train)
# prevendo com base no teste
y_pred = classificationExtraTree.predict(X_test)
# matriz dos erros e acertos
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
print('ExtraTree: {}%'.format(classificationExtraTree.score(X_test, y_test)))
# algoritimo random forest
from sklearn.ensemble import RandomForestClassifier
classificationRandomForest = RandomForestClassifier(n_estimators=200, random_state= 0)
classificationRandomForest.fit(X_train, y_train)
# prevendo a base no teste
y_pred = classificationRandomForest.predict(X_test)
# matriz dos erros e acertos
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print('-------------------------')
print(cm)
print('Random forest: {}%'.format(classificationRandomForest.score(X_test, y_test)))
# algoritimo logistica
from sklearn.linear_model import LogisticRegression
classificationLogistic = LogisticRegression(random_state= 0)
classificationLogistic.fit(X_train, y_train)
# prevemdo a base de teste
y_pred = classificationLogistic.predict(X_test)
# matriz dos erros e acertos
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print('-------------------------')
print(cm)
print('Logistic: {}%'.format(classificationLogistic.score(X_test, y_test)))
|
import math
import cv2
import matplotlib.cm
import numpy as np
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from scipy.ndimage.morphology import generate_binary_structure
# It is better to use 0.1 as threshold when evaluation, but 0.3 for demo
# purpose.
cmap = matplotlib.cm.get_cmap('hsv')
# Heatmap indices to find each limb (joint connection). Eg: limb_type=1 is
# Neck->LShoulder, so joint_to_limb_heatmap_relationship[1] represents the
# indices of heatmaps to look for joints: neck=1, LShoulder=5
joint_to_limb_heatmap_relationship = [
[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10],
[1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17],
[2, 16], [5, 17]]
# PAF indices containing the x and y coordinates of the PAF for a given limb.
# Eg: limb_type=1 is Neck->LShoulder, so
# PAFneckLShoulder_x=paf_xy_coords_per_limb[1][0] and
# PAFneckLShoulder_y=paf_xy_coords_per_limb[1][1]
paf_xy_coords_per_limb = [
[12, 13], [20, 21], [14, 15], [16, 17], [22, 23],
[24, 25], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [28, 29],
[30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]]
# Color code used to plot different joints and limbs (eg: joint_type=3 and
# limb_type=3 will use colors[3])
colors = [
[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],
[85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],
[0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255],
[255, 0, 255], [255, 0, 170], [255, 0, 85], [255, 0, 0]]
NUM_JOINTS = 18
NUM_LIMBS = len(joint_to_limb_heatmap_relationship)
def find_peaks(param, img):
"""
Given a (grayscale) image, find local maxima whose value is above a given
threshold (param['thre1'])
:param img: Input image (2d array) where we want to find peaks
:return: 2d np.array containing the [x,y] coordinates of each peak found
in the image
"""
peaks_binary = (maximum_filter(img, footprint=generate_binary_structure(
2, 1)) == img) * (img > param['thre1'])
# Note reverse ([::-1]): we return [[x y], [x y]...] instead of [[y x], [y
# x]...]
return np.array(np.nonzero(peaks_binary)[::-1]).T
def compute_resized_coords(coords, resizeFactor):
"""
Given the index/coordinates of a cell in some input array (e.g. image),
provides the new coordinates if that array was resized by making it
resizeFactor times bigger.
E.g.: image of size 3x3 is resized to 6x6 (resizeFactor=2), we'd like to
know the new coordinates of cell [1,2] -> Function would return [2.5,4.5]
:param coords: Coordinates (indices) of a cell in some input array
:param resizeFactor: Resize coefficient = shape_dest/shape_source. E.g.:
resizeFactor=2 means the destination array is twice as big as the
original one
:return: Coordinates in an array of size
shape_dest=resizeFactor*shape_source, expressing the array indices of the
closest point to 'coords' if an image of size shape_source was resized to
shape_dest
"""
# 1) Add 0.5 to coords to get coordinates of center of the pixel (e.g.
# index [0,0] represents the pixel at location [0.5,0.5])
# 2) Transform those coordinates to shape_dest, by multiplying by resizeFactor
# 3) That number represents the location of the pixel center in the new array,
# so subtract 0.5 to get coordinates of the array index/indices (revert
# step 1)
return (np.array(coords, dtype=float) + 0.5) * resizeFactor - 0.5
def NMS(param, heatmaps, upsampFactor=1., bool_refine_center=True, bool_gaussian_filt=False):
"""
NonMaximaSuppression: find peaks (local maxima) in a set of grayscale images
:param heatmaps: set of grayscale images on which to find local maxima (3d np.array,
with dimensions image_height x image_width x num_heatmaps)
:param upsampFactor: Size ratio between CPM heatmap output and the input image size.
Eg: upsampFactor=16 if original image was 480x640 and heatmaps are 30x40xN
:param bool_refine_center: Flag indicating whether:
- False: Simply return the low-res peak found upscaled by upsampFactor (subject to grid-snap)
- True: (Recommended, very accurate) Upsample a small patch around each low-res peak and
fine-tune the location of the peak at the resolution of the original input image
:param bool_gaussian_filt: Flag indicating whether to apply a 1d-GaussianFilter (smoothing)
to each upsampled patch before fine-tuning the location of each peak.
:return: a NUM_JOINTS x 4 np.array where each row represents a joint type (0=nose, 1=neck...)
and the columns indicate the {x,y} position, the score (probability) and a unique id (counter)
"""
# MODIFIED BY CARLOS: Instead of upsampling the heatmaps to heatmap_avg and
# then performing NMS to find peaks, this step can be sped up by ~25-50x by:
# (9-10ms [with GaussFilt] or 5-6ms [without GaussFilt] vs 250-280ms on RoG
# 1. Perform NMS at (low-res) CPM's output resolution
# 1.1. Find peaks using scipy.ndimage.filters.maximum_filter
# 2. Once a peak is found, take a patch of 5x5 centered around the peak, upsample it, and
# fine-tune the position of the actual maximum.
# '-> That's equivalent to having found the peak on heatmap_avg, but much faster because we only
# upsample and scan the 5x5 patch instead of the full (e.g.) 480x640
joint_list_per_joint_type = []
cnt_total_joints = 0
# For every peak found, win_size specifies how many pixels in each
# direction from the peak we take to obtain the patch that will be
# upsampled. Eg: win_size=1 -> patch is 3x3; win_size=2 -> 5x5
# (for BICUBIC interpolation to be accurate, win_size needs to be >=2!)
win_size = 2
for joint in range(NUM_JOINTS):
map_orig = heatmaps[:, :, joint]
peak_coords = find_peaks(param, map_orig)
peaks = np.zeros((len(peak_coords), 4))
for i, peak in enumerate(peak_coords):
if bool_refine_center:
x_min, y_min = np.maximum(0, peak - win_size)
x_max, y_max = np.minimum(
np.array(map_orig.T.shape) - 1, peak + win_size)
# Take a small patch around each peak and only upsample that
# tiny region
patch = map_orig[y_min:y_max + 1, x_min:x_max + 1]
map_upsamp = cv2.resize(
patch, None, fx=upsampFactor, fy=upsampFactor, interpolation=cv2.INTER_CUBIC)
# Gaussian filtering takes an average of 0.8ms/peak (and there might be
# more than one peak per joint!) -> For now, skip it (it's
# accurate enough)
map_upsamp = gaussian_filter(
map_upsamp, sigma=3) if bool_gaussian_filt else map_upsamp
# Obtain the coordinates of the maximum value in the patch
location_of_max = np.unravel_index(
map_upsamp.argmax(), map_upsamp.shape)
# Remember that peaks indicates [x,y] -> need to reverse it for
# [y,x]
location_of_patch_center = compute_resized_coords(
peak[::-1] - [y_min, x_min], upsampFactor)
# Calculate the offset wrt to the patch center where the actual
# maximum is
refined_center = (location_of_max - location_of_patch_center)
peak_score = map_upsamp[location_of_max]
else:
refined_center = [0, 0]
# Flip peak coordinates since they are [x,y] instead of [y,x]
peak_score = map_orig[tuple(peak[::-1])]
peaks[i, :] = tuple([int(round(x)) for x in compute_resized_coords(
peak_coords[i], upsampFactor) + refined_center[::-1]]) + (peak_score, cnt_total_joints)
cnt_total_joints += 1
joint_list_per_joint_type.append(peaks)
return joint_list_per_joint_type
def find_connected_joints(param, paf_upsamp, joint_list_per_joint_type, num_intermed_pts=10):
"""
For every type of limb (eg: forearm, shin, etc.), look for every potential
pair of joints (eg: every wrist-elbow combination) and evaluate the PAFs to
determine which pairs are indeed body limbs.
:param paf_upsamp: PAFs upsampled to the original input image resolution
:param joint_list_per_joint_type: See 'return' doc of NMS()
:param num_intermed_pts: Int indicating how many intermediate points to take
between joint_src and joint_dst, at which the PAFs will be evaluated
:return: List of NUM_LIMBS rows. For every limb_type (a row) we store
a list of all limbs of that type found (eg: all the right forearms).
For each limb (each item in connected_limbs[limb_type]), we store 5 cells:
# {joint_src_id,joint_dst_id}: a unique number associated with each joint,
# limb_score_penalizing_long_dist: a score of how good a connection
of the joints is, penalized if the limb length is too long
# {joint_src_index,joint_dst_index}: the index of the joint within
all the joints of that type found (eg: the 3rd right elbow found)
"""
connected_limbs = []
# Auxiliary array to access paf_upsamp quickly
limb_intermed_coords = np.empty((4, num_intermed_pts), dtype=np.intp)
for limb_type in range(NUM_LIMBS):
# List of all joints of type A found, where A is specified by limb_type
# (eg: a right forearm starts in a right elbow)
joints_src = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][0]]
# List of all joints of type B found, where B is specified by limb_type
# (eg: a right forearm ends in a right wrist)
joints_dst = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][1]]
if len(joints_src) == 0 or len(joints_dst) == 0:
# No limbs of this type found (eg: no right forearms found because
# we didn't find any right wrists or right elbows)
connected_limbs.append([])
else:
connection_candidates = []
# Specify the paf index that contains the x-coord of the paf for
# this limb
limb_intermed_coords[2, :] = paf_xy_coords_per_limb[limb_type][0]
# And the y-coord paf index
limb_intermed_coords[3, :] = paf_xy_coords_per_limb[limb_type][1]
for i, joint_src in enumerate(joints_src):
# Try every possible joints_src[i]-joints_dst[j] pair and see
# if it's a feasible limb
for j, joint_dst in enumerate(joints_dst):
# Subtract the position of both joints to obtain the
# direction of the potential limb
limb_dir = joint_dst[:2] - joint_src[:2]
# Compute the distance/length of the potential limb (norm
# of limb_dir)
limb_dist = np.sqrt(np.sum(limb_dir**2)) + 1e-8
limb_dir = limb_dir / limb_dist # Normalize limb_dir to be a unit vector
# Linearly distribute num_intermed_pts points from the x
# coordinate of joint_src to the x coordinate of joint_dst
limb_intermed_coords[1, :] = np.round(np.linspace(
joint_src[0], joint_dst[0], num=num_intermed_pts))
limb_intermed_coords[0, :] = np.round(np.linspace(
joint_src[1], joint_dst[1], num=num_intermed_pts)) # Same for the y coordinate
intermed_paf = paf_upsamp[limb_intermed_coords[0, :],
limb_intermed_coords[1, :], limb_intermed_coords[2:4, :]].T
score_intermed_pts = intermed_paf.dot(limb_dir)
score_penalizing_long_dist = score_intermed_pts.mean(
) + min(0.5 * paf_upsamp.shape[0] / limb_dist - 1, 0)
# Criterion 1: At least 80% of the intermediate points have
# a score higher than thre2
criterion1 = (np.count_nonzero(
score_intermed_pts > param['thre2']) > 0.8 * num_intermed_pts)
# Criterion 2: Mean score, penalized for large limb
# distances (larger than half the image height), is
# positive
criterion2 = (score_penalizing_long_dist > 0)
if criterion1 and criterion2:
# Last value is the combined paf(+limb_dist) + heatmap
# scores of both joints
connection_candidates.append(
[i, j, score_penalizing_long_dist, score_penalizing_long_dist + joint_src[2] + joint_dst[2]])
# Sort connection candidates based on their
# score_penalizing_long_dist
connection_candidates = sorted(
connection_candidates, key=lambda x: x[2], reverse=True)
connections = np.empty((0, 5))
# There can only be as many limbs as the smallest number of source
# or destination joints (eg: only 2 forearms if there's 5 wrists
# but 2 elbows)
max_connections = min(len(joints_src), len(joints_dst))
# Traverse all potential joint connections (sorted by their score)
for potential_connection in connection_candidates:
i, j, s = potential_connection[0:3]
# Make sure joints_src[i] or joints_dst[j] haven't already been
# connected to other joints_dst or joints_src
if i not in connections[:, 3] and j not in connections[:, 4]:
# [joint_src_id, joint_dst_id, limb_score_penalizing_long_dist, joint_src_index, joint_dst_index]
connections = np.vstack(
[connections, [joints_src[i][3], joints_dst[j][3], s, i, j]])
# Exit if we've already established max_connections
# connections (each joint can't be connected to more than
# one joint)
if len(connections) >= max_connections:
break
connected_limbs.append(connections)
return connected_limbs
def group_limbs_of_same_person(connected_limbs, joint_list):
"""
Associate limbs belonging to the same person together.
:param connected_limbs: See 'return' doc of find_connected_joints()
:param joint_list: unravel'd version of joint_list_per_joint [See 'return' doc of NMS()]
:return: 2d np.array of size num_people x (NUM_JOINTS+2). For each person found:
# First NUM_JOINTS columns contain the index (in joint_list) of the joints associated
with that person (or -1 if their i-th joint wasn't found)
# 2nd-to-last column: Overall score of the joints+limbs that belong to this person
# Last column: Total count of joints found for this person
"""
person_to_joint_assoc = []
for limb_type in range(NUM_LIMBS):
joint_src_type, joint_dst_type = joint_to_limb_heatmap_relationship[limb_type]
for limb_info in connected_limbs[limb_type]:
person_assoc_idx = []
for person, person_limbs in enumerate(person_to_joint_assoc):
if person_limbs[joint_src_type] == limb_info[0] or person_limbs[joint_dst_type] == limb_info[1]:
person_assoc_idx.append(person)
# If one of the joints has been associated to a person, and either
# the other joint is also associated with the same person or not
# associated to anyone yet:
if len(person_assoc_idx) == 1:
person_limbs = person_to_joint_assoc[person_assoc_idx[0]]
# If the other joint is not associated to anyone yet,
if person_limbs[joint_dst_type] != limb_info[1]:
# Associate it with the current person
person_limbs[joint_dst_type] = limb_info[1]
# Increase the number of limbs associated to this person
person_limbs[-1] += 1
# And update the total score (+= heatmap score of joint_dst
# + score of connecting joint_src with joint_dst)
person_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
elif len(person_assoc_idx) == 2: # if found 2 and disjoint, merge them
person1_limbs = person_to_joint_assoc[person_assoc_idx[0]]
person2_limbs = person_to_joint_assoc[person_assoc_idx[1]]
membership = ((person1_limbs >= 0) & (person2_limbs >= 0))[:-2]
if not membership.any(): # If both people have no same joints connected, merge them into a single person
# Update which joints are connected
person1_limbs[:-2] += (person2_limbs[:-2] + 1)
# Update the overall score and total count of joints
# connected by summing their counters
person1_limbs[-2:] += person2_limbs[-2:]
# Add the score of the current joint connection to the
# overall score
person1_limbs[-2] += limb_info[2]
person_to_joint_assoc.pop(person_assoc_idx[1])
else: # Same case as len(person_assoc_idx)==1 above
person1_limbs[joint_dst_type] = limb_info[1]
person1_limbs[-1] += 1
person1_limbs[-2] += joint_list[limb_info[1]
.astype(int), 2] + limb_info[2]
else: # No person has claimed any of these joints, create a new person
# Initialize person info to all -1 (no joint associations)
row = -1 * np.ones(20)
# Store the joint info of the new connection
row[joint_src_type] = limb_info[0]
row[joint_dst_type] = limb_info[1]
# Total count of connected joints for this person: 2
row[-1] = 2
# Compute overall score: score joint_src + score joint_dst + score connection
# {joint_src,joint_dst}
row[-2] = sum(joint_list[limb_info[:2].astype(int), 2]
) + limb_info[2]
person_to_joint_assoc.append(row)
# Delete people who have very few parts connected
people_to_delete = []
for person_id, person_info in enumerate(person_to_joint_assoc):
if person_info[-1] < 3 or person_info[-2] / person_info[-1] < 0.2:
people_to_delete.append(person_id)
# Traverse the list in reverse order so we delete indices starting from the
# last one (otherwise, removing item for example 0 would modify the indices of
# the remaining people to be deleted!)
for index in people_to_delete[::-1]:
person_to_joint_assoc.pop(index)
# Appending items to a np.array can be very costly (allocating new memory, copying over the array, then adding new row)
# Instead, we treat the set of people as a list (fast to append items) and
# only convert to np.array at the end
return np.array(person_to_joint_assoc)
def plot_pose(img_orig, joint_list, person_to_joint_assoc, bool_fast_plot=True, plot_ear_to_shoulder=False):
canvas = img_orig.copy() # Make a copy so we don't modify the original image
# to_plot is the location of all joints found overlaid on top of the
# original image
to_plot = canvas.copy() if bool_fast_plot else cv2.addWeighted(
img_orig, 0.3, canvas, 0.7, 0)
limb_thickness = 4
# Last 2 limbs connect ears with shoulders and this looks very weird.
# Disabled by default to be consistent with original rtpose output
which_limbs_to_plot = NUM_LIMBS if plot_ear_to_shoulder else NUM_LIMBS - 2
for limb_type in range(which_limbs_to_plot):
for person_joint_info in person_to_joint_assoc:
joint_indices = person_joint_info[joint_to_limb_heatmap_relationship[limb_type]].astype(
int)
if -1 in joint_indices:
# Only draw actual limbs (connected joints), skip if not
# connected
continue
# joint_coords[:,0] represents Y coords of both joints;
# joint_coords[:,1], X coords
joint_coords = joint_list[joint_indices, 0:2]
for joint in joint_coords: # Draw circles at every joint
cv2.circle(canvas, tuple(joint[0:2].astype(
int)), 4, (255, 255, 255), thickness=-1)
# mean along the axis=0 computes meanYcoord and meanXcoord -> Round
# and make int to avoid errors
coords_center = tuple(
np.round(np.mean(joint_coords, 0)).astype(int))
# joint_coords[0,:] is the coords of joint_src; joint_coords[1,:]
# is the coords of joint_dst
limb_dir = joint_coords[0, :] - joint_coords[1, :]
limb_length = np.linalg.norm(limb_dir)
# Get the angle of limb_dir in degrees using atan2(limb_dir_x,
# limb_dir_y)
angle = math.degrees(math.atan2(limb_dir[1], limb_dir[0]))
# For faster plotting, just plot over canvas instead of constantly
# copying it
cur_canvas = canvas if bool_fast_plot else canvas.copy()
polygon = cv2.ellipse2Poly(
coords_center, (int(limb_length / 2), limb_thickness), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[limb_type])
if not bool_fast_plot:
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
return to_plot, canvas
def decode_pose(img_orig, heatmaps, pafs):
param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
# Bottom-up approach:
# Step 1: find all joints in the image (organized by joint type: [0]=nose,
# [1]=neck...)
joint_list_per_joint_type = NMS(param,
heatmaps, img_orig.shape[0] / float(heatmaps.shape[0]))
# joint_list is an unravel'd version of joint_list_per_joint, where we add
# a 5th column to indicate the joint_type (0=nose, 1=neck...)
joint_list = np.array([tuple(peak) + (joint_type,) for joint_type,
joint_peaks in enumerate(joint_list_per_joint_type) for peak in joint_peaks])
# Step 2: find which joints go together to form limbs (which wrists go
# with which elbows)
paf_upsamp = cv2.resize(
pafs, (img_orig.shape[1], img_orig.shape[0]), interpolation=cv2.INTER_CUBIC)
connected_limbs = find_connected_joints(param,
paf_upsamp, joint_list_per_joint_type)
# Step 3: associate limbs that belong to the same person
person_to_joint_assoc = group_limbs_of_same_person(
connected_limbs, joint_list)
# (Step 4): plot results
to_plot, canvas = plot_pose(img_orig, joint_list, person_to_joint_assoc)
return to_plot, canvas, joint_list, person_to_joint_assoc
|
"""
Code for optimizers
"""
from typing import Union, Optional
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
# -- tests
def one_test():
pass
if __name__ == '__main__':
print('Done, success!\a')
|
# Minimum and Maximum
playerOneScore = 10
playerTwoScore = 4
print(min(playerOneScore, playerTwoScore))
print(max(1, 35, 1, 34))
print(min("Kathryn", "Katie"))
print(min("Angela", "Bob"))
print(max(playerOneScore, playerTwoScore))
|
# ------------------------------------------------------------------------------
#
class Callback (object) :
# FIXME: sigs
pass
# ------------------------------------------------------------------------------
#
|
import os
from .exception import *
from .linux import ShortCutterLinux
from tempfile import NamedTemporaryFile
import subprocess
class ShortCutterMacOS(ShortCutterLinux):
def _get_menu_folder(self):
return os.path.join('/', 'Applications')
def _create_shortcut_file(self, target_name, target_path, shortcut_directory):
"""
Creates a MacOS app which opens an executable via the terminal
Returns a the file path of the shortcut created
"""
shortcut_file_path = os.path.join(shortcut_directory, target_name + ".app")
# create the AppleScript script
sf = NamedTemporaryFile(mode = "w")
sf.write('tell application "Terminal"\n')
sf.write('activate\n')
sf.write('do script "{}"\n'.format(target_path))
sf.write('end tell\n')
sf.flush()
# compile the script into an application
result = subprocess.run(["osacompile", "-o", shortcut_file_path, sf.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if len(result.stderr):
raise ShortcutError("Error occured creating app - {}".format(str(result.stderr)))
sf.close()
return shortcut_file_path
|
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0408493,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.234774,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.220204,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.27366,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.473879,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.271783,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.01932,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.23674,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.72802,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0416013,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00992037,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.087046,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0733672,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.128647,
'Execution Unit/Register Files/Runtime Dynamic': 0.0832876,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.22153,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.569335,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.3121,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00186919,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00186919,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00163322,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000635072,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00105393,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00642552,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0177371,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0705298,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.4863,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.213532,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.239551,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.92588,
'Instruction Fetch Unit/Runtime Dynamic': 0.547776,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0761817,
'L2/Runtime Dynamic': 0.0140614,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.67543,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.19386,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.078885,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.078885,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.04946,
'Load Store Unit/Runtime Dynamic': 1.66178,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.194517,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.389034,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0690347,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0700817,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.278942,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0352932,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.556788,
'Memory Management Unit/Runtime Dynamic': 0.105375,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 21.898,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.145138,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0157399,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.140299,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.301177,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.94227,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0177828,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.216656,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0938998,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.124219,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.200361,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.101135,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.425716,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.127674,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.2393,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0177397,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00521031,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0444212,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0385334,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0621609,
'Execution Unit/Register Files/Runtime Dynamic': 0.0437437,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0980297,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.255123,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.34662,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00111941,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00111941,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00100571,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000406116,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000553536,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00379806,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00963597,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0370432,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.35627,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.110886,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.125815,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.68914,
'Instruction Fetch Unit/Runtime Dynamic': 0.287178,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0401139,
'L2/Runtime Dynamic': 0.0076918,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.56296,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.646918,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.042894,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0428939,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.76551,
'Load Store Unit/Runtime Dynamic': 0.90135,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.105769,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.211538,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0375379,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0380633,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.146504,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0184061,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.367096,
'Memory Management Unit/Runtime Dynamic': 0.0564694,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.6906,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0466647,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00617233,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0627772,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.115614,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.71492,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0272392,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224084,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.142575,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0958895,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.154666,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0780703,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.328626,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0878108,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.24838,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0269355,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00402204,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0394662,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0297454,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0664017,
'Execution Unit/Register Files/Runtime Dynamic': 0.0337675,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0899556,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.234122,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.22598,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000501425,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000501425,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00044855,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0001801,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000427295,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0018787,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00438568,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.028595,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.81889,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0779486,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0971216,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.12568,
'Instruction Fetch Unit/Runtime Dynamic': 0.20993,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0449894,
'L2/Runtime Dynamic': 0.00989063,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.31186,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.531958,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0347704,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0347704,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.47605,
'Load Store Unit/Runtime Dynamic': 0.738205,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0857378,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.171476,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0304286,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0310789,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.113092,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.012854,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.321472,
'Memory Management Unit/Runtime Dynamic': 0.0439329,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.8061,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0708553,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00518857,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0479699,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.124014,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.35195,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.014382,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.213985,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.072817,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0732598,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.118165,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0596459,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.251071,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0726249,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.09592,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0137567,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00307284,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0278023,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0227256,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.041559,
'Execution Unit/Register Files/Runtime Dynamic': 0.0257984,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.062168,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.166287,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.06252,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000411852,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000411852,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000367122,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000146713,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000326455,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00151728,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0036487,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0218467,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.38964,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0559541,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0742011,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.67559,
'Instruction Fetch Unit/Runtime Dynamic': 0.157168,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0396406,
'L2/Runtime Dynamic': 0.0097391,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.03742,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.399971,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0258918,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0258916,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.15969,
'Load Store Unit/Runtime Dynamic': 0.553552,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0638447,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.127689,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0226587,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.023246,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0864024,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00919651,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.281435,
'Memory Management Unit/Runtime Dynamic': 0.0324425,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.8417,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0361879,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00374568,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.037235,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0771686,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.89259,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 6.273627845793811,
'Runtime Dynamic': 6.273627845793811,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.313869,
'Runtime Dynamic': 0.0854326,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 66.5503,
'Peak Power': 99.6626,
'Runtime Dynamic': 11.9872,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 66.2364,
'Total Cores/Runtime Dynamic': 11.9017,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.313869,
'Total L3s/Runtime Dynamic': 0.0854326,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright Blaze 2021.
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
https://www.boost.org/LICENSE_1_0.txt)
"""
from typing import Callable, Dict, cast, List, TypeVar, Union
from datetime import datetime
from pprint import pformat
import time
from web3.types import FilterParams, LogReceipt, TxData
from gevent.pool import Pool
import simplejson as json
from web3 import Web3
import gevent
from syn.utils.data import SYN_DATA, LOGS_REDIS_URL, TOKEN_DECIMALS
from syn.utils.helpers import get_gas_stats_for_tx, handle_decimals, \
get_airdrop_value_for_block, convert, parse_logs_out, parse_tx_in
from syn.utils.explorer.data import TOPICS, Direction
from syn.utils.contract import get_bridge_token_info
_start_blocks = {
# 'ethereum': 13136427, # 2021-09-01
'ethereum': 13033669,
'arbitrum': 657404,
'avalanche': 3376709,
'bsc': 10065475,
'fantom': 18503502,
'polygon': 18026806,
'harmony': 18646320,
'boba': 16188,
'moonriver': 890949,
'optimism': 30718,
'aurora': 56092179,
'moonbeam': 173355,
}
airdrop_ranges = {
'polygon': {
# +------------------------- The airdrop value in the chain's native
# | token (used for paying gas fees).
# |
# | +----------------- Shows this is the bridge's initial fee.
# | |
# | | +------------ Airdrop was 0.0003 till this block
# | | | (including this block).
# | | |
# v v v
0.0003: [None, 20335948],
# +-------------------- Airdrop was 0.02 starting from this
# | block (including this block).
# |
# | +---------- Shows this is the airdrop value currently.
# | |
# v v
0.02: [20335949, None],
},
'bsc': {
0.001: [None, 12038426],
0.002: [12038427, None],
},
'avalanche': {
0.05: [None, 7164612],
0.025: [7164613, None],
},
'fantom': {
0.4: [None, None],
},
'moonriver': {
0.1: [None, 914404],
0.002: [914405, None],
},
'ethereum': {
0: [None, None],
},
'arbitrum': {
0: [None, 3393884],
0.003: [3393885, None],
},
'harmony': {
0.1: [None, None],
},
'boba': {
0.005: [None, None],
},
'optimism': {
0: [None, 541401],
0.002: [541402, None],
},
'aurora': {
# Currenty 0 gas needed for txs on Aurora.
0: [None, None],
},
'moonbeam': {
0: [None, None],
},
}
pool = Pool(size=64)
MAX_BLOCKS = 5000
T = TypeVar('T')
def bridge_callback(chain: str, address: str, log: LogReceipt,
first_run: bool) -> None:
w3: Web3 = SYN_DATA[chain]['w3']
tx_hash = log['transactionHash']
block_n = log['blockNumber']
timestamp = w3.eth.get_block(block_n)['timestamp'] # type: ignore
date = datetime.utcfromtimestamp(timestamp).date()
topic = cast(str, convert(log['topics'][0]))
if topic not in TOPICS:
raise RuntimeError(f'sanity check? got invalid topic: {topic}')
args: Dict[str, Union[int, str]]
direction = TOPICS[topic]
if direction == Direction.OUT:
# For OUT transactions the bridged asset
# and its amount are stored in the logs data
args = parse_logs_out(log)
elif direction == Direction.IN:
# For IN transactions the bridged asset
# and its amount are stored in the tx.input
tx_data: TxData = w3.eth.get_transaction(tx_hash)
# All IN transactions are guaranteed to be
# from validators to Bridge contract
args = parse_tx_in(tx_data)
else:
raise RuntimeError(f'sanity check? got {direction}')
if 'token' not in args:
raise RuntimeError(
f'No token: chain = {chain}, tx_hash = {convert(tx_hash)}')
asset = cast(str, args['token']).lower()
if 'chain_id' in args:
_chain = f':{args["chain_id"]}'
else:
_chain = ''
if asset not in TOKEN_DECIMALS[chain]:
ret = get_bridge_token_info(chain, asset)
if not ret:
# Someone tried to bridge an unsupported token - ignore it.
return
else:
print(f'new token {chain} {asset} {ret}')
TOKEN_DECIMALS[chain].update({asset.lower(): ret[2]})
decimals = TOKEN_DECIMALS[chain][asset]
# Amount is in nUSD/nETH/SYN/etc
value = {'amount': handle_decimals(args['amount'], decimals), 'txCount': 1}
if direction == Direction.IN:
# All `IN` txs are from the validator;
# let's track how much gas they pay.
receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=60)
gas_stats = get_gas_stats_for_tx(chain, w3, tx_hash, receipt)
value['validator'] = gas_stats
# Let's also track how much fees the user paid for the bridge tx
value['fees'] = handle_decimals(args['fee'], 18)
# All `IN` txs give some airdrop amounts, well on most chains at least.
if chain in airdrop_ranges:
value['airdrops'] = get_airdrop_value_for_block(
airdrop_ranges[chain], block_n)
else:
raise RuntimeError(f'{chain} is not in `airdrop_ranges`')
# Just in case we ever need that later for debugging
# value['txs'] = f'[{convert(tx_hash)}]'
key = f'{chain}:bridge:{date}:{asset}:{direction}{_chain}'
if (ret := LOGS_REDIS_URL.get(key)) is not None:
ret = json.loads(ret, use_decimal=True)
if direction == Direction.IN:
if 'validator' not in ret:
raise RuntimeError(
f'No validator for key = {key}, ret = {pformat(ret, indent=2)}'
)
if 'validator' not in value:
raise RuntimeError(
f'No validator: chain = {chain}, tx_hash = {convert(tx_hash)}'
)
if chain in airdrop_ranges:
ret['airdrops'] += value['airdrops']
ret['validator']['gas_price'] += value['validator']['gas_price']
ret['validator']['gas_paid'] += value['validator']['gas_paid']
ret['fees'] += value['fees']
ret['amount'] += value['amount']
ret['txCount'] += 1
# Just in case we ever need that later for debugging
# ret['txs'] += ' ' + value['txs']
LOGS_REDIS_URL.set(key, json.dumps(ret))
else:
# NOTE: we push this into the bridge callback rather than it's own
# callback to save some rpc calls, why can't they be free? *sigh*.
# First bridge tx of the day; store this block so we can later map
# date to block, which is a limitation of eth rpc. However this should
# not get confused with the FIRST block of the day, rather it is the
# first block of the day which contains a bridge event.
_key = f'{chain}:date2block:{date}'
LOGS_REDIS_URL.setnx(
_key, json.dumps({
'block': block_n,
'timestamp': timestamp,
}))
LOGS_REDIS_URL.set(key, json.dumps(value))
LOGS_REDIS_URL.set(f'{chain}:logs:{address}:MAX_BLOCK_STORED',
log['blockNumber'])
LOGS_REDIS_URL.set(f'{chain}:logs:{address}:TX_INDEX',
log['transactionIndex'])
def get_logs(
chain: str,
callback: Callable[[str, str, LogReceipt, bool], None],
address: str,
start_block: int = None,
till_block: int = None,
max_blocks: int = MAX_BLOCKS,
topics: List[str] = list(TOPICS),
key_namespace: str = 'logs',
start_blocks: Dict[str, int] = _start_blocks,
prefer_db_values: bool = True,
) -> None:
w3: Web3 = SYN_DATA[chain]['w3']
_chain = f'[{chain}]'
chain_len = max(len(c) for c in SYN_DATA) + 2
tx_index = -1
if start_block is None or prefer_db_values:
_key_block = f'{chain}:{key_namespace}:{address}:MAX_BLOCK_STORED'
_key_index = f'{chain}:{key_namespace}:{address}:TX_INDEX'
if (ret := LOGS_REDIS_URL.get(_key_block)) is not None:
_start_block = max(int(ret), start_blocks[chain])
if (ret := LOGS_REDIS_URL.get(_key_index)) is not None:
tx_index = int(ret)
else:
_start_block = start_blocks[chain]
if start_block is not None and prefer_db_values:
# We don't want to go back in blocks we already checked.
start_block = max(_start_block, start_block)
else:
start_block = _start_block
if till_block is None:
till_block = w3.eth.block_number
print(
f'{key_namespace} | {_chain:{chain_len}} starting from {start_block} '
f'with block height of {till_block}')
jobs: List[gevent.Greenlet] = []
_start = time.time()
x = 0
total_events = 0
initial_block = start_block
first_run = True
while start_block < till_block:
to_block = min(start_block + max_blocks, till_block)
params: FilterParams = {
'fromBlock': start_block,
'toBlock': to_block,
'address': w3.toChecksumAddress(address),
'topics': [topics], # type: ignore
}
logs: List[LogReceipt] = w3.eth.get_logs(params)
# Apparently, some RPC nodes don't bother
# sorting events in a chronological order.
# Let's sort them by block (from oldest to newest)
# And by transaction index (within the same block,
# also in ascending order)
logs = sorted(
logs,
key=lambda k: (k['blockNumber'], k['transactionIndex'])
)
for log in logs:
# Skip transactions from the very first block
# that are already in the DB
if log['blockNumber'] == initial_block \
and log['transactionIndex'] <= tx_index:
continue
try:
callback(chain, address, log, first_run)
except Exception as e:
print(chain, log)
raise e
if first_run:
first_run = False
start_block += max_blocks + 1
y = time.time() - _start
total_events += len(logs)
percent = 100 * (to_block - initial_block) \
/ (till_block - initial_block)
print(f'{key_namespace} | {_chain:{chain_len}} elapsed {y:5.1f}s'
f' ({y - x:5.1f}s), found {total_events:5} events,'
f' {percent:4.1f}% done: so far at block {start_block}')
x = y
gevent.joinall(jobs)
print(f'{_chain:{chain_len}} it took {time.time() - _start:.1f}s!')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,
where \mu and \lambda are Lame constants, I is the identity tensor.
Dirichlet boundary conditions: u(x)=\hat{u} for x\in\Gamma_D
Neumann boundary conditions: \sigma n = \hat{t} for x\in \Gamma_N,
where n is the normal vector.
For this example:
\Omega is a rectangle with corners at (0,0) and (8,2)
Dirichlet boundary conditions for x=0:
u(x,y) = P/(6*E*I)*y*((2+nu)*(y^2-W^2/4))
v(x,y) = -P/(6*E*I)*(3*nu*y^2*L)
and parabolic traction at x=8
p(x,y) = P*(y^2 - y*W)/(2*I)
where P=2 is the maxmimum traction
E = 1e3 is Young's modulus
nu = 0.25 is the Poisson ratio
I = W^3/12 is second moment of area of the cross-section
"""
import tensorflow as tf
import numpy as np
import time
from utils.tfp_loss import tfp_function_factory
import scipy.optimize
from utils.scipy_loss import scipy_function_factory
from utils.Geom_examples import Quadrilateral
from utils.Solvers import Elasticity2D_coll_dist
from utils.Plotting import plot_pts
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
np.random.seed(42)
tf.random.set_seed(42)
class Elast_TimoshenkoBeam(Elasticity2D_coll_dist):
'''
Class including the boundary conditions for the Timoshenko beam problem
'''
def __init__(self, layers, train_op, num_epoch, print_epoch, model_data, data_type):
super().__init__(layers, train_op, num_epoch, print_epoch, model_data, data_type)
@tf.function
def dirichletBound(self, X, xPhys, yPhys):
# multiply by x,y for strong imposition of boundary conditions
u_val = X[:,0:1]
v_val = X[:,1:2]
self.W = 2.0
self.L = 8.0
self.I = self.W**3/12
self.P = 2.0
self.pei = self.P/(6*self.Emod*self.I)
y_temp = yPhys - self.W/2
u_left = self.pei*y_temp*((2+self.nu)*(y_temp**2-self.W**2/4));
v_left =-self.pei*(3*self.nu*y_temp**2*self.L);
u_val = xPhys*u_val + u_left
v_val = xPhys*v_val + v_left
return u_val, v_val
#define the input and output data set
beam_length = 8.
beam_width = 2.
domainCorners = np.array([[0., 0.], [0, beam_width], [beam_length, 0.], [beam_length, beam_width]])
geomDomain = Quadrilateral(domainCorners)
numPtsU = 80
numPtsV = 40
#xPhys, yPhys = myQuad.getRandomIntPts(numPtsU*numPtsV)
xPhys, yPhys = geomDomain.getUnifIntPts(numPtsU,numPtsV,[0,0,0,0])
data_type = "float32"
Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)
Yint = np.zeros_like(Xint).astype(data_type)
# prepare boundary points in the fromat Xbnd = [Xcoord, Ycoord, dir] and
# Ybnd = [trac], where Xcoord, Ycoord are the x and y coordinate of the point,
# dir=0 for the x-component of the traction and dir=1 for the y-component of
# the traction
#bottom boundary, include both x and y directions
xPhysBndB, yPhysBndB, xNormB, yNormB = geomDomain.getUnifEdgePts(numPtsU, numPtsV, [1,0,0,0])
dirB0 = np.zeros_like(xPhysBndB)
dirB1 = np.ones_like(xPhysBndB)
XbndB0 = np.concatenate((xPhysBndB, yPhysBndB, xNormB, yNormB, dirB0), axis=1).astype(data_type)
XbndB1 = np.concatenate((xPhysBndB, yPhysBndB, xNormB, yNormB, dirB1), axis=1).astype(data_type)
#boundary for x=beam_length, include both the x and y directions
xPhysBndC, yPhysBndC, xNormC, yNormC = geomDomain.getUnifEdgePts(numPtsU, numPtsV, [0,1,0,0])
dirC0 = np.zeros_like(xPhysBndC)
dirC1 = np.ones_like(xPhysBndC)
XbndC0 = np.concatenate((xPhysBndC, yPhysBndC, xNormC, yNormC, dirC0), axis=1).astype(data_type)
XbndC1 = np.concatenate((xPhysBndC, yPhysBndC, xNormC, yNormC, dirC1), axis=1).astype(data_type)
#boundary for y=beam_width, include both the x and y direction
xPhysBndD, yPhysBndD, xNormD, yNormD = geomDomain.getUnifEdgePts(numPtsU, numPtsV, [0,0,1,0])
dirD0 = np.zeros_like(xPhysBndD)
dirD1 = np.ones_like(xPhysBndD)
XbndD0 = np.concatenate((xPhysBndD, yPhysBndD, xNormD, yNormD, dirD0), axis=1).astype(data_type)
XbndD1 = np.concatenate((xPhysBndD, yPhysBndD, xNormD, yNormD, dirD1), axis=1).astype(data_type)
# concatenate all the boundaries
Xbnd = np.concatenate((XbndB0, XbndB1, XbndC0, XbndC1, XbndD0, XbndD1), axis=0)
#plot the collocation points
plot_pts(Xint, Xbnd[:,0:2])
model_data = dict()
model_data["E"] = 1e3
model_data["nu"] = 0.25
model_data["state"] = "plane stress"
#define loading
pressure = 2.
YbndB0 = np.zeros_like(xPhysBndB).astype(data_type)
YbndB1 = np.zeros_like(xPhysBndB).astype(data_type)
YbndC0 = np.zeros_like(xPhysBndC).astype(data_type)
inert = beam_width**3/12
YbndC1 = (pressure*(yPhysBndC**2 - yPhysBndC*beam_width)/(2*inert)).astype(data_type)
YbndD0 = np.zeros_like(xPhysBndD).astype(data_type)
YbndD1 = np.zeros_like(xPhysBndD).astype(data_type)
Ybnd = np.concatenate((YbndB0, YbndB1, YbndC0, YbndC1, YbndD0, YbndD1), axis=0)
#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(20, "swish")
l2 = tf.keras.layers.Dense(20, "swish")
l3 = tf.keras.layers.Dense(20, "swish")
l4 = tf.keras.layers.Dense(2, None)
train_op = tf.keras.optimizers.Adam()
train_op2 = "TFP-BFGS"
num_epoch = 15000
print_epoch = 100
pred_model = Elast_TimoshenkoBeam([l1, l2, l3, l4], train_op, num_epoch,
print_epoch, model_data, data_type)
#convert the training data to tensors
Xint_tf = tf.convert_to_tensor(Xint)
Yint_tf = tf.convert_to_tensor(Yint)
Xbnd_tf = tf.convert_to_tensor(Xbnd)
Ybnd_tf = tf.convert_to_tensor(Ybnd)
#training
t0 = time.time()
print("Training (ADAM)...")
pred_model.network_learn(Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf )
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")
if train_op2=="SciPy-LBFGS-B":
print("Training (SciPy-LBFGS-B)...")
loss_func = scipy_function_factory(pred_model, Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf)
init_params = np.float64(tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables).numpy())
results = scipy.optimize.minimize(fun=loss_func, x0=init_params, jac=True, method='L-BFGS-B',
options={'disp': None, 'maxls': 50, 'iprint': -1,
'gtol': 1e-6, 'eps': 1e-6, 'maxiter': 50000, 'ftol': 1e-6,
'maxcor': 50, 'maxfun': 50000})
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.x)
else:
print("Training (TFP-BFGS)...")
loss_func = tfp_function_factory(pred_model, Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)
# train the model with L-BFGS solver
results = tfp.optimizer.bfgs_minimize(
value_and_gradients_function=loss_func, initial_position=init_params,
max_iterations=10000, tolerance=1e-14)
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.position)
t2 = time.time()
print("Time taken (BFGS)", t2-t1, "seconds")
print("Time taken (all)", t2-t0, "seconds")
#define the exact displacements
def exact_disp(x,y):
E = model_data["E"]
nu = model_data["nu"]
inert=beam_width**3/12;
pei=pressure/(6*E*inert)
y_temp = y - beam_width/2 #move (0,0) to below left corner
x_disp = pei*y_temp*((6*beam_length-3*x)*x+(2+nu)*(y_temp**2-beam_width**2/4))
y_disp =-pei*(3*nu*y_temp**2*(beam_length-x)+(4+5*nu)*beam_width**2*x/4+(3*beam_length-x)*x**2)
return x_disp, y_disp
print("Testing...")
numPtsUTest = 2*numPtsU
numPtsVTest = 2*numPtsV
xPhysTest, yPhysTest = geomDomain.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])
XTest = np.concatenate((xPhysTest,yPhysTest),axis=1).astype(data_type)
XTest_tf = tf.convert_to_tensor(XTest)
YTest = pred_model(XTest_tf).numpy()
xPhysTest2D = np.resize(XTest[:,0], [numPtsVTest, numPtsUTest])
yPhysTest2D = np.resize(XTest[:,1], [numPtsVTest, numPtsUTest])
YTest2D_x = np.resize(YTest[:,0], [numPtsVTest, numPtsUTest])
YTest2D_y = np.resize(YTest[:,1], [numPtsVTest, numPtsUTest])
plt.contourf(xPhysTest2D, yPhysTest2D, YTest2D_x, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Computed x-displacement")
plt.axis('equal')
plt.show()
plt.contourf(xPhysTest2D, yPhysTest2D, YTest2D_y, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Computed y-displacement")
plt.axis('equal')
plt.show()
# comparison with exact solution
ux_exact, uy_exact = exact_disp(xPhysTest, yPhysTest)
ux_test = YTest[:,0:1]
uy_test = YTest[:,1:2]
err_norm = np.sqrt(np.sum((ux_exact-ux_test)**2+(uy_exact-uy_test)**2))
ex_norm = np.sqrt(np.sum(ux_exact**2 + uy_exact**2))
rel_err_l2 = err_norm/ex_norm
print("Relative L2 error: ", rel_err_l2)
YExact2D_x = np.resize(ux_exact, [numPtsVTest, numPtsUTest])
YExact2D_y = np.resize(uy_exact, [numPtsVTest, numPtsUTest])
plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D_x, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Exact x-displacement")
plt.axis('equal')
plt.show()
plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D_y, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Exact y-displacement")
plt.axis('equal')
plt.show()
plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D_x-YTest2D_x, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Error for x-displacement")
plt.axis('equal')
plt.show()
plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D_y-YTest2D_y, 255, cmap=plt.cm.jet)
plt.colorbar()
plt.title("Error for y-displacement")
plt.axis('equal')
plt.show()
|
# H
class Product:
def __init__(self, id, name, cost):
self.__id = id
self.__name = name
self.__cost = cost
self.__quantity = 1
self.__stock = 0
def add_quantity(self):
self.__quantity += 1
def remove_quantity(self):
self.__quantity -= 1
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_cost(self):
return self.__cost
def set_cost(self, cost):
self.__cost = cost
def get_quantity(self):
return self.__quantity
def get_stock(self):
return self.__stock
def set_stock(self, stock):
self.__stock = stock
|
import ctypes
from vector2 import Vector2
class float4(ctypes.Structure):
_fields_ = [
('m', ctypes.c_float * 4)
]
class m4(ctypes.Structure):
_fields_ = [
('m00', ctypes.c_float), ('m01', ctypes.c_float),
('m10', ctypes.c_float), ('m11', ctypes.c_float)
]
class columns(ctypes.Structure):
_fields_ = [
('c0', Vector2),
('c1', Vector2)
]
class Matrix2(ctypes.Union):
_anonymous_ = ['columns', 's1', 's2']
_fields_ = [
('columns', columns),
('s1', float4),
('s2', m4)
]
def __str__(self):
values = [float(x) for x in self.s1.m]
mstr = f'''Matrix2:
[{values[0]:.4f}, {values[1]:.4f}]
[{values[2]:.4f}, {values[3]:.4f}]'''
return mstr
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
cols = (
Vector2(1.0, 0.0),
Vector2(0.0, 1.0))
self.columns = cols
if __name__ == '__main__':
m2 = Matrix2()
print(m2)
|
#!/usr/bin/python3
"""
输入圆的半径计算计算周长和面积
version: 0.1
author: icro
"""
import math
r = float(input('圆的半径 = '))
p = 2 * math.pi * r
a = math.pi * r * r
print('周长: %.2f' % p)
print('面积: %.2f' % a)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 13:27:49 2021
@author: Hatlab-RRK
"""
import numpy as np
import matplotlib.pyplot as plt
#visualizing phase offest in a mixer
I_0 = lambda t: np.cos(t)
Q_0 = lambda t: np.sin(t)
delay = np.pi/6
I_delayed = lambda t: np.cos(t+delay)
t = np.linspace(0, 2*np.pi-np.pi/32, 64, endpoint = False)
plt.plot(I_0(t), Q_0(t), label = 'base')
plt.plot(I_delayed(t), Q_0(t), label = 'I delayed')
plt.legend()
plt.gca().set_aspect(1)
#it skews the plot. Now compare the magnitudes
plt.figure()
plt.plot(t, I_0(t)**2+Q_0(t)**2, label = 'base')
plt.plot(t, I_delayed(t)**2+Q_0(t)**2, label = 'I delayed')
plt.legend()
plt.gca().set_aspect(1)
#notice the delayed version has higher maximum amplitude
#now we can try to correct the delay
I_corrected_by_delay = I_delayed(t-delay)
# def orthogonalization(I_delayed, Q_0):
# I_corrected, Q_corrected
# for I_val, Q_val in I_delayed, Q_0:
plt.figure()
plt.plot(I_0(t), Q_0(t), label = 'base')
plt.plot(I_delayed(t), Q_0(t), label = 'I delayed')
I_corrected = lambda t, Q, delay: (I_0(t)-Q*np.sin(delay))/np.cos(delay)
I_corrected_then_run_through_system = I_corrected(t-delay, Q_0(t), delay)
# plt.plot(I_corrected(t, Q_0(t), delay), Q_0(t), label = 'I corrected by function')
plt.plot(I_corrected_then_run_through_system, Q_0(t), label = 'I corrected by function then run through system')
# plt.plot(I_corrected_by_delay, Q_0(t), label = 'I corrected by delay')
plt.legend()
plt.gca().set_aspect(1)
|
"""Unit tests for _properties/compatibility.py."""
# pylint: disable=protected-access
# pylint: disable=invalid-name,missing-docstring
import unittest
from unittest import mock
import keras
import hmmlearn
from pyhumour._properties.language_models import HMMHelper
class TestHMM(unittest.TestCase):
def setUp(self) -> None:
self._hmm = HMMHelper(["this is funny","so damn funny"]+["not funny","so not funny"])
def test_score(self):
result = self._hmm.get_hmm_score("damn funny")
self.assertEqual(round(result,3), -7.687)
def test_tokenizer(self):
self.assertIsInstance(type(self._hmm.get_tokenizer(123)),type(keras.preprocessing.text.Tokenizer))
def test_hmmObject(self):
self.assertIsInstance(type(self._hmm.get_hmm()),type(hmmlearn.hmm.GaussianHMM))
|
from typing import Union, Tuple, Optional, List
import torch
from ....torchio import DATA
from ....utils import is_image_dict, to_tuple
from .. import RandomTransform
class RandomFlip(RandomTransform):
"""Reverse the order of elements in an image along the given axes.
Args:
axes: Axis or tuple of axes along which the image will be flipped.
flip_probability: Probability that the image will be flipped. This is
computed on a per-axis basis.
seed: See :py:class:`~torchio.transforms.augmentation.RandomTransform`.
"""
def __init__(
self,
axes: Union[int, Tuple[int, ...]] = 0,
flip_probability: float = 0.5,
seed: Optional[int] = None,
):
super().__init__(seed=seed)
self.axes = self.parse_axes(axes)
self.flip_probability = self.parse_probability(
flip_probability,
'flip_probability',
)
def apply_transform(self, sample: dict) -> dict:
axes_to_flip_hot = self.get_params(self.axes, self.flip_probability)
sample['random_flip'] = axes_to_flip_hot
for image_dict in sample.values():
if not is_image_dict(image_dict):
continue
tensor = image_dict[DATA]
dims = []
for dim, flip_this in enumerate(axes_to_flip_hot):
if not flip_this:
continue
actual_dim = dim + 1 # images are 4D
dims.append(actual_dim)
tensor = torch.flip(tensor, dims=dims)
image_dict[DATA] = tensor
return sample
@staticmethod
def get_params(axes: Tuple[int, ...], probability: float) -> List[bool]:
axes_hot = [False, False, False]
for axis in axes:
random_number = torch.rand(1)
flip_this = bool(probability > random_number)
axes_hot[axis] = flip_this
return axes_hot
@staticmethod
def parse_axes(axes: Union[int, Tuple[int, ...]]):
axes_tuple = to_tuple(axes)
for axis in axes_tuple:
is_int = isinstance(axis, int)
if not is_int or axis not in (0, 1, 2):
raise ValueError('All axes must be 0, 1 or 2')
return axes_tuple
|
# Copyright 2012 NagiosQL-API authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#encoding: utf-8
from django.db import models
class TblCommand(models.Model):
command_name = models.CharField(unique=True, max_length=255)
command_line = models.TextField()
command_type = models.IntegerField()
register = models.CharField(max_length=3)
active = models.CharField(max_length=3)
last_modified = models.DateTimeField()
access_group = models.IntegerField()
config_id = models.IntegerField(unique=False)
class Meta:
db_table = u'tbl_command'
#
# class TblConfigtarget(models.Model):
# id = models.IntegerField(primary_key=True)
# target = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# server = models.CharField(max_length=765)
# method = models.CharField(max_length=765)
# user = models.CharField(max_length=765)
# password = models.CharField(max_length=765)
# ssh_key_path = models.CharField(max_length=765)
# basedir = models.CharField(max_length=765)
# hostconfig = models.CharField(max_length=765)
# serviceconfig = models.CharField(max_length=765)
# backupdir = models.CharField(max_length=765)
# hostbackup = models.CharField(max_length=765)
# servicebackup = models.CharField(max_length=765)
# nagiosbasedir = models.CharField(max_length=765)
# importdir = models.CharField(max_length=765)
# picturedir = models.CharField(max_length=765)
# commandfile = models.CharField(max_length=765)
# binaryfile = models.CharField(max_length=765)
# pidfile = models.CharField(max_length=765)
# conffile = models.CharField(max_length=765)
# version = models.IntegerField()
# access_group = models.IntegerField()
# active = models.CharField(max_length=3)
# nodelete = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_configtarget'
#
# class TblContact(models.Model):
# id = models.IntegerField(primary_key=True)
# contact_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# contactgroups = models.IntegerField()
# contactgroups_tploptions = models.IntegerField()
# host_notifications_enabled = models.IntegerField()
# service_notifications_enabled = models.IntegerField()
# host_notification_period = models.IntegerField()
# service_notification_period = models.IntegerField()
# host_notification_options = models.CharField(max_length=60)
# service_notification_options = models.CharField(max_length=60)
# host_notification_commands = models.IntegerField()
# host_notification_commands_tploptions = models.IntegerField()
# service_notification_commands = models.IntegerField()
# service_notification_commands_tploptions = models.IntegerField()
# can_submit_commands = models.IntegerField()
# retain_status_information = models.IntegerField()
# retain_nonstatus_information = models.IntegerField()
# email = models.CharField(max_length=765)
# pager = models.CharField(max_length=765)
# address1 = models.CharField(max_length=765)
# address2 = models.CharField(max_length=765)
# address3 = models.CharField(max_length=765)
# address4 = models.CharField(max_length=765)
# address5 = models.CharField(max_length=765)
# address6 = models.CharField(max_length=765)
# name = models.CharField(max_length=765)
# use_variables = models.IntegerField()
# use_template = models.IntegerField()
# use_template_tploptions = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_contact'
#
# class TblContactgroup(models.Model):
# id = models.IntegerField(primary_key=True)
# contactgroup_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# members = models.IntegerField()
# contactgroup_members = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_contactgroup'
#
# class TblContacttemplate(models.Model):
# id = models.IntegerField(primary_key=True)
# template_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# contactgroups = models.IntegerField()
# contactgroups_tploptions = models.IntegerField()
# host_notifications_enabled = models.IntegerField()
# service_notifications_enabled = models.IntegerField()
# host_notification_period = models.IntegerField()
# service_notification_period = models.IntegerField()
# host_notification_options = models.CharField(max_length=60)
# service_notification_options = models.CharField(max_length=60)
# host_notification_commands = models.IntegerField()
# host_notification_commands_tploptions = models.IntegerField()
# service_notification_commands = models.IntegerField()
# service_notification_commands_tploptions = models.IntegerField()
# can_submit_commands = models.IntegerField()
# retain_status_information = models.IntegerField()
# retain_nonstatus_information = models.IntegerField()
# email = models.CharField(max_length=765)
# pager = models.CharField(max_length=765)
# address1 = models.CharField(max_length=765)
# address2 = models.CharField(max_length=765)
# address3 = models.CharField(max_length=765)
# address4 = models.CharField(max_length=765)
# address5 = models.CharField(max_length=765)
# address6 = models.CharField(max_length=765)
# use_variables = models.IntegerField()
# use_template = models.IntegerField()
# use_template_tploptions = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_contacttemplate'
#
# class TblDatadomain(models.Model):
# id = models.IntegerField(primary_key=True)
# domain = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# targets = models.IntegerField()
# version = models.IntegerField()
# enable_common = models.IntegerField()
# utf8_decode = models.IntegerField()
# access_group = models.IntegerField()
# active = models.CharField(max_length=3)
# nodelete = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_datadomain'
#
# class TblGroup(models.Model):
# id = models.IntegerField(primary_key=True)
# groupname = models.CharField(max_length=765)
# description = models.CharField(max_length=765)
# users = models.IntegerField()
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_group'
#
class TblHost(models.Model):
host_name = models.CharField(unique=True, max_length=255)
alias = models.CharField(max_length=765)
display_name = models.CharField(max_length=765)
address = models.CharField(max_length=765)
parents = models.IntegerField()
parents_tploptions = models.IntegerField()
hostgroups = models.IntegerField()
hostgroups_tploptions = models.IntegerField()
check_command = models.TextField()
use_template = models.IntegerField()
use_template_tploptions = models.IntegerField()
initial_state = models.CharField(max_length=60)
max_check_attempts = models.IntegerField(null=True, blank=True)
check_interval = models.IntegerField(null=True, blank=True)
retry_interval = models.IntegerField(null=True, blank=True)
active_checks_enabled = models.IntegerField()
passive_checks_enabled = models.IntegerField()
check_period = models.IntegerField()
obsess_over_host = models.IntegerField()
check_freshness = models.IntegerField()
freshness_threshold = models.IntegerField(null=True, blank=True)
event_handler = models.IntegerField()
event_handler_enabled = models.IntegerField()
low_flap_threshold = models.IntegerField(null=True, blank=True)
high_flap_threshold = models.IntegerField(null=True, blank=True)
flap_detection_enabled = models.IntegerField()
flap_detection_options = models.CharField(max_length=60)
process_perf_data = models.IntegerField()
retain_status_information = models.IntegerField()
retain_nonstatus_information = models.IntegerField()
contacts = models.IntegerField()
contacts_tploptions = models.IntegerField()
contact_groups = models.IntegerField()
contact_groups_tploptions = models.IntegerField()
notification_interval = models.IntegerField(null=True, blank=True)
notification_period = models.IntegerField()
first_notification_delay = models.IntegerField(null=True, blank=True)
notification_options = models.CharField(max_length=60)
notifications_enabled = models.IntegerField()
stalking_options = models.CharField(max_length=60)
notes = models.CharField(max_length=765)
notes_url = models.CharField(max_length=765)
action_url = models.CharField(max_length=765)
icon_image = models.CharField(max_length=1500)
icon_image_alt = models.CharField(max_length=765)
vrml_image = models.CharField(max_length=765)
statusmap_image = models.CharField(max_length=765)
number_2d_coords = models.CharField(max_length=765, db_column=u'2d_coords') # Field renamed because it wasn't a valid Python identifier.
number_3d_coords = models.CharField(max_length=765, db_column=u'3d_coords') # Field renamed because it wasn't a valid Python identifier.
use_variables = models.IntegerField()
name = models.CharField(max_length=765)
register = models.CharField(max_length=3)
active = models.CharField(max_length=3)
last_modified = models.DateTimeField()
access_group = models.IntegerField()
config_id = models.IntegerField(unique=False)
class Meta:
db_table = u'tbl_host'
@property
def act(self):
return self.active == "1"
def __str__(self):
return 'Host(%s)' % self.host_name
#
# class TblHostdependency(models.Model):
# id = models.IntegerField(primary_key=True)
# config_name = models.TextField(unique=True)
# dependent_host_name = models.IntegerField()
# dependent_hostgroup_name = models.IntegerField()
# host_name = models.IntegerField()
# hostgroup_name = models.IntegerField()
# inherits_parent = models.IntegerField()
# execution_failure_criteria = models.CharField(max_length=60)
# notification_failure_criteria = models.CharField(max_length=60)
# dependency_period = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_hostdependency'
#
# class TblHostescalation(models.Model):
# id = models.IntegerField(primary_key=True)
# config_name = models.TextField(unique=True)
# host_name = models.IntegerField()
# hostgroup_name = models.IntegerField()
# contacts = models.IntegerField()
# contact_groups = models.IntegerField()
# first_notification = models.IntegerField(null=True, blank=True)
# last_notification = models.IntegerField(null=True, blank=True)
# notification_interval = models.IntegerField(null=True, blank=True)
# escalation_period = models.IntegerField()
# escalation_options = models.CharField(max_length=60)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_hostescalation'
#
# class TblHostextinfo(models.Model):
# id = models.IntegerField(primary_key=True)
# host_name = models.IntegerField(unique=True)
# notes = models.CharField(max_length=765)
# notes_url = models.CharField(max_length=765)
# action_url = models.CharField(max_length=765)
# statistik_url = models.CharField(max_length=765)
# icon_image = models.CharField(max_length=1500)
# icon_image_alt = models.CharField(max_length=765)
# vrml_image = models.CharField(max_length=765)
# statusmap_image = models.CharField(max_length=765)
# number_2d_coords = models.CharField(max_length=765, db_column=u'2d_coords') # Field renamed because it wasn't a valid Python identifier.
# number_3d_coords = models.CharField(max_length=765, db_column=u'3d_coords') # Field renamed because it wasn't a valid Python identifier.
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_hostextinfo'
#
# class TblHostgroup(models.Model):
# id = models.IntegerField(primary_key=True)
# hostgroup_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# members = models.IntegerField()
# hostgroup_members = models.IntegerField()
# notes = models.CharField(max_length=765)
# notes_url = models.CharField(max_length=765)
# action_url = models.CharField(max_length=765)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_hostgroup'
#
class TblHosttemplate(models.Model):
template_name = models.CharField(unique=True, max_length=255)
alias = models.CharField(max_length=765)
parents = models.IntegerField()
parents_tploptions = models.IntegerField()
hostgroups = models.IntegerField()
hostgroups_tploptions = models.IntegerField()
check_command = models.TextField()
use_template = models.IntegerField()
use_template_tploptions = models.IntegerField()
initial_state = models.CharField(max_length=60)
max_check_attempts = models.IntegerField(null=True, blank=True)
check_interval = models.IntegerField(null=True, blank=True)
retry_interval = models.IntegerField(null=True, blank=True)
active_checks_enabled = models.IntegerField()
passive_checks_enabled = models.IntegerField()
check_period = models.IntegerField()
obsess_over_host = models.IntegerField()
check_freshness = models.IntegerField()
freshness_threshold = models.IntegerField(null=True, blank=True)
event_handler = models.IntegerField()
event_handler_enabled = models.IntegerField()
low_flap_threshold = models.IntegerField(null=True, blank=True)
high_flap_threshold = models.IntegerField(null=True, blank=True)
flap_detection_enabled = models.IntegerField()
flap_detection_options = models.CharField(max_length=60)
process_perf_data = models.IntegerField()
retain_status_information = models.IntegerField()
retain_nonstatus_information = models.IntegerField()
contacts = models.IntegerField()
contacts_tploptions = models.IntegerField()
contact_groups = models.IntegerField()
contact_groups_tploptions = models.IntegerField()
notification_interval = models.IntegerField(null=True, blank=True)
notification_period = models.IntegerField()
first_notification_delay = models.IntegerField(null=True, blank=True)
notification_options = models.CharField(max_length=60)
notifications_enabled = models.IntegerField()
stalking_options = models.CharField(max_length=60)
notes = models.CharField(max_length=765)
notes_url = models.CharField(max_length=765)
action_url = models.CharField(max_length=765)
icon_image = models.CharField(max_length=1500)
icon_image_alt = models.CharField(max_length=765)
vrml_image = models.CharField(max_length=765)
statusmap_image = models.CharField(max_length=765)
number_2d_coords = models.CharField(max_length=765, db_column=u'2d_coords') # Field renamed because it wasn't a valid Python identifier.
number_3d_coords = models.CharField(max_length=765, db_column=u'3d_coords') # Field renamed because it wasn't a valid Python identifier.
use_variables = models.IntegerField()
register = models.CharField(max_length=3)
active = models.CharField(max_length=3)
last_modified = models.DateTimeField()
access_group = models.IntegerField()
config_id = models.IntegerField(unique=False)
class Meta:
db_table = u'tbl_hosttemplate'
#
# class TblInfo(models.Model):
# id = models.IntegerField(primary_key=True)
# key1 = models.CharField(max_length=300)
# key2 = models.CharField(max_length=300)
# version = models.CharField(max_length=150)
# language = models.CharField(max_length=150)
# infotext = models.TextField()
# class Meta:
# db_table = u'tbl_info'
#
# class TblLanguage(models.Model):
# id = models.IntegerField(primary_key=True)
# language = models.CharField(max_length=765)
# locale = models.CharField(max_length=765)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_language'
#
# class TblLnkcontacttocommandhost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContactToCommandHost'
#
# class TblLnkcontacttocommandservice(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContactToCommandService'
#
# class TblLnkcontacttocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContactToContactgroup'
#
# class TblLnkcontacttocontacttemplate(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
# idtable = models.IntegerField(primary_key=True, db_column='idTable') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkContactToContacttemplate'
#
# class TblLnkcontacttovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkContactToVariabledefinition'
#
# class TblLnkcontactgrouptocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContactgroupToContact'
#
# class TblLnkcontactgrouptocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContactgroupToContactgroup'
#
# class TblLnkcontacttemplatetocommandhost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContacttemplateToCommandHost'
#
# class TblLnkcontacttemplatetocommandservice(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContacttemplateToCommandService'
#
# class TblLnkcontacttemplatetocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkContacttemplateToContactgroup'
#
# class TblLnkcontacttemplatetocontacttemplate(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
# idtable = models.IntegerField(primary_key=True, db_column='idTable') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkContacttemplateToContacttemplate'
#
# class TblLnkcontacttemplatetovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkContacttemplateToVariabledefinition'
#
# class TblLnkgrouptouser(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# read = models.CharField(max_length=3)
# write = models.CharField(max_length=3)
# link = models.CharField(max_length=3)
# class Meta:
# db_table = u'tbl_lnkGroupToUser'
#
# class TblLnkhosttocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostToContact'
#
# class TblLnkhosttocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostToContactgroup'
#
# class TblLnkhosttohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostToHost'
#
# class TblLnkhosttohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostToHostgroup'
#
# class TblLnkhosttohosttemplate(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
# idtable = models.IntegerField(primary_key=True, db_column='idTable') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkHostToHosttemplate'
#
# class TblLnkhosttovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkHostToVariabledefinition'
#
# class TblLnkhostdependencytohostDh(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostdependencyToHost_DH'
#
# class TblLnkhostdependencytohostH(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostdependencyToHost_H'
#
# class TblLnkhostdependencytohostgroupDh(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostdependencyToHostgroup_DH'
#
# class TblLnkhostdependencytohostgroupH(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostdependencyToHostgroup_H'
#
# class TblLnkhostescalationtocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostescalationToContact'
#
# class TblLnkhostescalationtocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostescalationToContactgroup'
#
# class TblLnkhostescalationtohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostescalationToHost'
#
# class TblLnkhostescalationtohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostescalationToHostgroup'
#
# class TblLnkhostgrouptohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostgroupToHost'
#
# class TblLnkhostgrouptohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHostgroupToHostgroup'
#
# class TblLnkhosttemplatetocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHosttemplateToContact'
#
# class TblLnkhosttemplatetocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHosttemplateToContactgroup'
#
# class TblLnkhosttemplatetohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHosttemplateToHost'
#
# class TblLnkhosttemplatetohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkHosttemplateToHostgroup'
#
# class TblLnkhosttemplatetohosttemplate(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
# idtable = models.IntegerField(primary_key=True, db_column='idTable') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkHosttemplateToHosttemplate'
#
# class TblLnkhosttemplatetovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkHosttemplateToVariabledefinition'
#
# class TblLnkservicetocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceToContact'
#
# class TblLnkservicetocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceToContactgroup'
#
class TblLnkservicetohost(models.Model):
idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
idslave = models.IntegerField(db_column='idSlave') # Field name made lowercase.
exclude = models.IntegerField()
class Meta:
db_table = u'tbl_lnkServiceToHost'
#
# class TblLnkservicetohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceToHostgroup'
#
# class TblLnkservicetoservicegroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceToServicegroup'
#
class TblLnkservicetoservicetemplate(models.Model):
idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
idslave = models.IntegerField(db_column='idSlave') # Field name made lowercase.
idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
idtable = models.IntegerField(db_column='idTable') # Field name made lowercase.
class Meta:
db_table = u'tbl_lnkServiceToServicetemplate'
#
# class TblLnkservicetovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkServiceToVariabledefinition'
#
# class TblLnkservicedependencytohostDh(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToHost_DH'
#
# class TblLnkservicedependencytohostH(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToHost_H'
#
# class TblLnkservicedependencytohostgroupDh(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToHostgroup_DH'
#
# class TblLnkservicedependencytohostgroupH(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToHostgroup_H'
#
# class TblLnkservicedependencytoserviceDs(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# strslave = models.CharField(max_length=765, db_column='strSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToService_DS'
#
# class TblLnkservicedependencytoserviceS(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# strslave = models.CharField(max_length=765, db_column='strSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToService_S'
#
# class TblLnkservicedependencytoservicegroupDs(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToServicegroup_DS'
#
# class TblLnkservicedependencytoservicegroupS(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicedependencyToServicegroup_S'
#
# class TblLnkserviceescalationtocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToContact'
#
# class TblLnkserviceescalationtocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToContactgroup'
#
# class TblLnkserviceescalationtohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToHost'
#
# class TblLnkserviceescalationtohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToHostgroup'
#
# class TblLnkserviceescalationtoservice(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# strslave = models.CharField(max_length=765, db_column='strSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToService'
#
# class TblLnkserviceescalationtoservicegroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServiceescalationToServicegroup'
#
# class TblLnkservicegrouptoservice(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslaveh = models.IntegerField(primary_key=True, db_column='idSlaveH') # Field name made lowercase.
# idslavehg = models.IntegerField(primary_key=True, db_column='idSlaveHG') # Field name made lowercase.
# idslaves = models.IntegerField(primary_key=True, db_column='idSlaveS') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicegroupToService'
#
# class TblLnkservicegrouptoservicegroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicegroupToServicegroup'
#
# class TblLnkservicetemplatetocontact(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicetemplateToContact'
#
# class TblLnkservicetemplatetocontactgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicetemplateToContactgroup'
#
# class TblLnkservicetemplatetohost(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicetemplateToHost'
#
# class TblLnkservicetemplatetohostgroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicetemplateToHostgroup'
#
# class TblLnkservicetemplatetoservicegroup(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkServicetemplateToServicegroup'
#
# class TblLnkservicetemplatetoservicetemplate(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# idsort = models.IntegerField(db_column='idSort') # Field name made lowercase.
# idtable = models.IntegerField(primary_key=True, db_column='idTable') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkServicetemplateToServicetemplate'
#
# class TblLnkservicetemplatetovariabledefinition(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_lnkServicetemplateToVariabledefinition'
#
# class TblLnktimeperiodtotimeperiod(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkTimeperiodToTimeperiod'
#
# class TblLnktimeperiodtotimeperioduse(models.Model):
# idmaster = models.IntegerField(primary_key=True, db_column='idMaster') # Field name made lowercase.
# idslave = models.IntegerField(primary_key=True, db_column='idSlave') # Field name made lowercase.
# exclude = models.IntegerField()
# class Meta:
# db_table = u'tbl_lnkTimeperiodToTimeperiodUse'
#
# class TblLogbook(models.Model):
# id = models.BigIntegerField(primary_key=True)
# time = models.DateTimeField()
# user = models.CharField(max_length=765)
# ipadress = models.CharField(max_length=765)
# domain = models.CharField(max_length=765)
# entry = models.TextField()
# class Meta:
# db_table = u'tbl_logbook'
#
# class TblMenu(models.Model):
# mnuid = models.IntegerField(primary_key=True, db_column='mnuId') # Field name made lowercase.
# mnutopid = models.IntegerField(db_column='mnuTopId') # Field name made lowercase.
# mnugrpid = models.IntegerField(db_column='mnuGrpId') # Field name made lowercase.
# mnucntid = models.IntegerField(db_column='mnuCntId') # Field name made lowercase.
# mnuname = models.CharField(max_length=765, db_column='mnuName') # Field name made lowercase.
# mnulink = models.CharField(max_length=765, db_column='mnuLink') # Field name made lowercase.
# mnuactive = models.IntegerField(db_column='mnuActive') # Field name made lowercase.
# mnuorderid = models.IntegerField(db_column='mnuOrderId') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_menu'
#
# class TblRelationinformation(models.Model):
# id = models.IntegerField(primary_key=True)
# master = models.CharField(max_length=765)
# tablename1 = models.CharField(max_length=765, db_column='tableName1') # Field name made lowercase.
# tablename2 = models.CharField(max_length=765, db_column='tableName2') # Field name made lowercase.
# fieldname = models.CharField(max_length=765, db_column='fieldName') # Field name made lowercase.
# linktable = models.CharField(max_length=765, db_column='linkTable') # Field name made lowercase.
# target1 = models.CharField(max_length=765)
# target2 = models.CharField(max_length=765)
# targetkey = models.CharField(max_length=765, db_column='targetKey') # Field name made lowercase.
# fullrelation = models.IntegerField(db_column='fullRelation') # Field name made lowercase.
# flags = models.CharField(max_length=765)
# type = models.IntegerField()
# class Meta:
# db_table = u'tbl_relationinformation'
#
class TblService(models.Model):
config_name = models.CharField(max_length=765)
host_name = models.IntegerField()
host_name_tploptions = models.IntegerField()
hostgroup_name = models.IntegerField()
hostgroup_name_tploptions = models.IntegerField()
service_description = models.CharField(max_length=765)
display_name = models.CharField(max_length=765)
servicegroups = models.IntegerField()
servicegroups_tploptions = models.IntegerField()
use_template = models.IntegerField()
use_template_tploptions = models.IntegerField()
check_command = models.TextField()
is_volatile = models.IntegerField()
initial_state = models.CharField(max_length=60)
max_check_attempts = models.IntegerField(null=True, blank=True)
check_interval = models.IntegerField(null=True, blank=True)
retry_interval = models.IntegerField(null=True, blank=True)
active_checks_enabled = models.IntegerField()
passive_checks_enabled = models.IntegerField()
check_period = models.IntegerField()
parallelize_check = models.IntegerField()
obsess_over_service = models.IntegerField()
check_freshness = models.IntegerField()
freshness_threshold = models.IntegerField(null=True, blank=True)
event_handler = models.IntegerField()
event_handler_enabled = models.IntegerField()
low_flap_threshold = models.IntegerField(null=True, blank=True)
high_flap_threshold = models.IntegerField(null=True, blank=True)
flap_detection_enabled = models.IntegerField()
flap_detection_options = models.CharField(max_length=60)
process_perf_data = models.IntegerField()
retain_status_information = models.IntegerField()
retain_nonstatus_information = models.IntegerField()
notification_interval = models.IntegerField(null=True, blank=True)
first_notification_delay = models.IntegerField(null=True, blank=True)
notification_period = models.IntegerField()
notification_options = models.CharField(max_length=60)
notifications_enabled = models.IntegerField()
contacts = models.IntegerField()
contacts_tploptions = models.IntegerField()
contact_groups = models.IntegerField()
contact_groups_tploptions = models.IntegerField()
stalking_options = models.CharField(max_length=60)
notes = models.CharField(max_length=765)
notes_url = models.CharField(max_length=765)
action_url = models.CharField(max_length=765)
icon_image = models.CharField(max_length=1500)
icon_image_alt = models.CharField(max_length=765)
use_variables = models.IntegerField()
name = models.CharField(max_length=765)
register = models.CharField(max_length=3)
active = models.CharField(max_length=3)
last_modified = models.DateTimeField()
access_group = models.IntegerField()
config_id = models.IntegerField()
import_hash = models.CharField(max_length=765)
class Meta:
db_table = u'tbl_service'
# class TblServicedependency(models.Model):
# id = models.IntegerField(primary_key=True)
# config_name = models.TextField(unique=True)
# dependent_host_name = models.IntegerField()
# dependent_hostgroup_name = models.IntegerField()
# dependent_service_description = models.IntegerField()
# dependent_servicegroup_name = models.IntegerField()
# host_name = models.IntegerField()
# hostgroup_name = models.IntegerField()
# service_description = models.IntegerField()
# servicegroup_name = models.IntegerField()
# inherits_parent = models.IntegerField()
# execution_failure_criteria = models.CharField(max_length=60)
# notification_failure_criteria = models.CharField(max_length=60)
# dependency_period = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_servicedependency'
#
# class TblServiceescalation(models.Model):
# id = models.IntegerField(primary_key=True)
# config_name = models.CharField(max_length=765)
# host_name = models.IntegerField()
# hostgroup_name = models.IntegerField()
# service_description = models.IntegerField()
# servicegroup_name = models.IntegerField()
# contacts = models.IntegerField()
# contact_groups = models.IntegerField()
# first_notification = models.IntegerField(null=True, blank=True)
# last_notification = models.IntegerField(null=True, blank=True)
# notification_interval = models.IntegerField(null=True, blank=True)
# escalation_period = models.IntegerField()
# escalation_options = models.CharField(max_length=60)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField()
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_serviceescalation'
#
# class TblServiceextinfo(models.Model):
# id = models.IntegerField(primary_key=True)
# host_name = models.IntegerField(unique=True)
# service_description = models.IntegerField(unique=True)
# notes = models.CharField(max_length=765)
# notes_url = models.CharField(max_length=765)
# action_url = models.CharField(max_length=765)
# statistic_url = models.CharField(max_length=765)
# icon_image = models.CharField(max_length=1500)
# icon_image_alt = models.CharField(max_length=765)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_serviceextinfo'
#
# class TblServicegroup(models.Model):
# id = models.IntegerField(primary_key=True)
# servicegroup_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# members = models.IntegerField()
# servicegroup_members = models.IntegerField()
# notes = models.CharField(max_length=765, blank=True)
# notes_url = models.CharField(max_length=765, blank=True)
# action_url = models.CharField(max_length=765, blank=True)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_servicegroup'
#
# class TblServicetemplate(models.Model):
# id = models.IntegerField(primary_key=True)
# template_name = models.TextField(unique=True)
# host_name = models.IntegerField()
# host_name_tploptions = models.IntegerField()
# hostgroup_name = models.IntegerField()
# hostgroup_name_tploptions = models.IntegerField()
# service_description = models.CharField(max_length=765)
# display_name = models.CharField(max_length=765)
# servicegroups = models.IntegerField()
# servicegroups_tploptions = models.IntegerField()
# use_template = models.IntegerField()
# use_template_tploptions = models.IntegerField()
# check_command = models.TextField()
# is_volatile = models.IntegerField()
# initial_state = models.CharField(max_length=60)
# max_check_attempts = models.IntegerField(null=True, blank=True)
# check_interval = models.IntegerField(null=True, blank=True)
# retry_interval = models.IntegerField(null=True, blank=True)
# active_checks_enabled = models.IntegerField()
# passive_checks_enabled = models.IntegerField()
# check_period = models.IntegerField()
# parallelize_check = models.IntegerField()
# obsess_over_service = models.IntegerField()
# check_freshness = models.IntegerField()
# freshness_threshold = models.IntegerField(null=True, blank=True)
# event_handler = models.IntegerField()
# event_handler_enabled = models.IntegerField()
# low_flap_threshold = models.IntegerField(null=True, blank=True)
# high_flap_threshold = models.IntegerField(null=True, blank=True)
# flap_detection_enabled = models.IntegerField()
# flap_detection_options = models.CharField(max_length=60)
# process_perf_data = models.IntegerField()
# retain_status_information = models.IntegerField()
# retain_nonstatus_information = models.IntegerField()
# notification_interval = models.IntegerField(null=True, blank=True)
# first_notification_delay = models.IntegerField(null=True, blank=True)
# notification_period = models.IntegerField()
# notification_options = models.CharField(max_length=60)
# notifications_enabled = models.IntegerField()
# contacts = models.IntegerField()
# contacts_tploptions = models.IntegerField()
# contact_groups = models.IntegerField()
# contact_groups_tploptions = models.IntegerField()
# stalking_options = models.CharField(max_length=60)
# notes = models.CharField(max_length=765)
# notes_url = models.CharField(max_length=765)
# action_url = models.CharField(max_length=765)
# icon_image = models.CharField(max_length=1500)
# icon_image_alt = models.CharField(max_length=765)
# use_variables = models.IntegerField()
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# import_hash = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_servicetemplate'
#
# class TblSettings(models.Model):
# id = models.IntegerField(primary_key=True)
# category = models.CharField(max_length=60)
# name = models.CharField(max_length=90, unique=True)
# value = models.CharField(max_length=765)
# class Meta:
# db_table = u'tbl_settings'
#
# class TblTablestatus(models.Model):
# id = models.IntegerField(primary_key=True)
# tablename = models.CharField(max_length=765, db_column='tableName') # Field name made lowercase.
# domainid = models.IntegerField(db_column='domainId') # Field name made lowercase.
# updatetime = models.DateTimeField(db_column='updateTime') # Field name made lowercase.
# class Meta:
# db_table = u'tbl_tablestatus'
#
# class TblTimedefinition(models.Model):
# id = models.IntegerField(primary_key=True)
# tipid = models.IntegerField(db_column='tipId') # Field name made lowercase.
# definition = models.CharField(max_length=765)
# range = models.TextField()
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_timedefinition'
#
# class TblTimeperiod(models.Model):
# id = models.IntegerField(primary_key=True)
# timeperiod_name = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# exclude = models.IntegerField()
# use_template = models.IntegerField()
# name = models.CharField(max_length=765)
# register = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# last_modified = models.DateTimeField()
# access_group = models.IntegerField()
# config_id = models.IntegerField(unique=True)
# class Meta:
# db_table = u'tbl_timeperiod'
#
# class TblUser(models.Model):
# id = models.IntegerField(primary_key=True)
# username = models.TextField(unique=True)
# alias = models.CharField(max_length=765)
# password = models.CharField(max_length=765)
# admin_enable = models.CharField(max_length=3)
# wsauth = models.CharField(max_length=3)
# active = models.CharField(max_length=3)
# nodelete = models.CharField(max_length=3)
# language = models.CharField(max_length=60)
# domain = models.IntegerField()
# last_login = models.DateTimeField()
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_user'
#
# class TblVariabledefinition(models.Model):
# id = models.IntegerField(primary_key=True)
# name = models.CharField(max_length=765)
# value = models.CharField(max_length=765)
# last_modified = models.DateTimeField()
# class Meta:
# db_table = u'tbl_variabledefinition'
|
from django.conf.urls import url
from .import views
urlpatterns = [
url(r'^index1',views.Dname.as_view()),
]
|
import csv
# import os
def convert_tsv_to_csv(input, out):
# if os.path.exists(out):
# raise ValueError("Output file already exists")
reader = csv.reader(open(input, 'rU'), dialect=csv.excel_tab)
writer = csv.writer(open(out, "w+"), dialect="excel")
for row in reader:
writer.writerow(row)
def convert_csv_to_tsv(input, out):
# if os.path.exists(out):
# raise ValueError("Output file already exists")
reader = csv.reader(open(input, 'rU'), dialect='excel')
writer = csv.writer(open(out, "w+"), dialect=csv.excel_tab)
for row in reader:
writer.writerow(row)
if __name__ == "__main__":
csv_file = './data/sample_csv-1.csv'
tsv_file = './data/csv_dmo.tsv'
convert_csv_to_tsv(csv_file, tsv_file)
print('csv to tsv, Done')
convert_tsv_to_csv(tsv_file, csv_file)
print('tsv to csv, Done')
|
# tests grabbed from:
# https://github.com/tidyverse/dplyr/blob/master/tests/testthat/test-slice.r
from pandas.testing import assert_frame_equal
from pipda.context import ContextError
import pytest
from datar.core.grouped import DataFrameRowwise
from datar.all import *
from datar.datasets import mtcars
from datar.dplyr.dslice import _n_from_prop
def test_empty_slice_returns_input():
df = tibble(x=[1,2,3])
assert slice(df).equals(df)
def test_slice_handles_numeric_input():
g = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
res = g >> slice(1)
assert nrow(res) == 3
exp = g >> filter(row_number() == 1)
assert_frame_equal(res, exp)
res1 = mtcars >> slice(1)
res2 = mtcars >> filter(row_number() == 1)
assert_frame_equal(res1, res2)
def test_slice_silently_ignores_out_of_range_values():
res1 = slice(mtcars, c(2, 100))
res2 = slice(mtcars, 2)
assert_frame_equal(res1, res2)
g = group_by(mtcars, f.cyl)
res1 = slice(g, c(2, 100))
res2 = slice(g, 2)
assert_frame_equal(res1, res2)
def test_slice_works_with_negative_indices():
res = slice(mtcars, ~f[:2])
exp = tail(mtcars, -2)
assert_frame_equal(res, exp)
def test_slice_works_with_grouped_data():
g = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
res = slice(g, f[:2])
exp = filter(g, row_number() < 3)
assert_frame_equal(res, exp)
res = slice(g, ~f[:2])
exp = filter(g, row_number() >= 3)
assert res.equals(exp)
g = group_by(tibble(x=c(1,1,2,2,2)), f.x)
out = group_keys(slice(g, 3, _preserve=True)) >> pull(f.x, to='list')
assert out == [1,2]
out = group_keys(slice(g, 3, _preserve=False)) >> pull(f.x, to='list')
assert out == [2]
def test_slice_gives_correct_rows():
a = tibble(value=[f"row{i}" for i in range(1,11)])
out = slice(a, c(1,2,3)) >> pull(f.value, to='list')
assert out == ['row1', 'row2', 'row3']
out = slice(a, c(4,6,9)) >> pull(f.value, to='list')
assert out == ['row4', 'row6', 'row9']
a = tibble(
value=[f"row{i}" for i in range(1,11)],
group=rep([1,2], each=5)
) >> group_by(f.group)
out = slice(a, f[1:3]) >> pull(f.value, to='list')
assert out == [f'row{i}' for i in [1,2,3, 6,7,8]]
out = slice(a, c(2,4)) >> pull(f.value, to='list')
assert out == [f'row{i}' for i in [2,4,7,9]]
def test_slice_handles_na():
df = tibble(x=[1,2,3])
assert nrow(slice(df, NA)) == 0
assert nrow(slice(df, c(1, NA))) == 1
out = df >> slice(c(~c(1), NA)) >> nrow()
assert out == 2
df = tibble(x=[1,2,3,4], g=rep([1,2], 2)) >> group_by(f.g)
assert nrow(slice(df, c(1, NA))) == 2
out = df >> slice(c(~c(1), NA)) >> nrow()
assert out == 2
def test_slice_handles_logical_NA():
df = tibble(x=[1,2,3])
assert nrow(slice(df, NA)) == 0
def test_slice_handles_empty_df():
df = tibble(x=[])
res = df >> slice(f[:3])
assert nrow(res) == 0
assert names(res) == ["x"]
def test_slice_works_fine_if_n_gt_nrow():
by_slice = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
slice_res = by_slice >> slice(8)
filter_res = by_slice >> group_by(f.cyl) >> filter(row_number() == 8)
assert slice_res.equals(filter_res)
def test_slice_strips_grouped_indices():
res = mtcars >> group_by(f.cyl) >> slice(1) >> mutate(mpgplus=f.mpg+1)
assert nrow(res) == 3
assert group_rows(res) == [[0], [1], [2]]
def test_slice_works_with_0col_dfs():
out = tibble(a=[1,2,3]) >> select(~f.a) >> slice(1) >> nrow()
assert out == 1
def test_slice_correctly_computes_positive_indices_from_negative_indices():
x = tibble(y=range(1,11))
# negative in dplyr meaning exclusive
assert slice(x, ~f[10:30]).equals(tibble(y=range(1,10)))
def test_slice_accepts_star_args():
out1 = slice(mtcars, 1, 2)
out2 = slice(mtcars, [1,2])
assert out1.equals(out2)
out3 = slice(mtcars, 1, n())
out4 = slice(mtcars, c(1, nrow(mtcars)))
assert out3.equals(out4)
g = mtcars >> group_by(f.cyl)
out5 = slice(g, 1, n())
out6 = slice(g, c(1, n()))
assert out5.equals(out6)
def test_slice_does_not_evaluate_the_expression_in_empty_groups():
res = mtcars >> \
group_by(f.cyl) >> \
filter(f.cyl==6) >> \
slice(f[:2])
assert nrow(res) == 2
# sample_n is Superseded in favor of slice_sample
# res = mtcars >> \
# group_by(f.cyl) >> \
# filter(f.cyl==6) >> \
# sample_n(size=3)
# assert nrow(res) == 3
def test_slice_handles_df_columns():
df = tibble(x=[1,2], y=tibble(a=[1,2], b=[3,4]), z=tibble(A=[1,2], B=[3,4]))
out = slice(df, 1)
assert out.equals(df.iloc[[0], :])
gdf = group_by(df, f.x)
assert slice(gdf, 1).equals(gdf)
# TODO: group_by a stacked df is not supported yet
gdf = group_by(df, f['y$a'], f['y$b'])
assert slice(gdf, 1).equals(gdf)
gdf = group_by(df, f['z$A'], f['z$B'])
assert slice(gdf, 1).equals(gdf)
# # Slice variants ----------------------------------------------------------
def test_functions_silently_truncate_results():
df = tibble(x=range(1,6))
out = df >> slice_head(n=6) >> nrow()
assert out == 5
out = df >> slice_tail(n=6) >> nrow()
assert out == 5
out = df >> slice_sample(n=6) >> nrow()
assert out == 5
out = df >> slice_min(f.x, n=6) >> nrow()
assert out == 5
out = df >> slice_max(f.x, n=6) >> nrow()
assert out == 5
def test_proportion_computed_correctly():
df = tibble(x=range(1,11))
out = df >> slice_head(prop=.11) >> nrow()
assert out == 1
out = df >> slice_tail(prop=.11) >> nrow()
assert out == 1
out = df >> slice_sample(prop=.11) >> nrow()
assert out == 1
out = df >> slice_min(f.x, prop=.11) >> nrow()
assert out == 1
out = df >> slice_max(f.x, prop=.11) >> nrow()
assert out == 1
out = df >> slice_max(f.x, prop=.11, with_ties=False) >> nrow()
assert out == 1
out = df >> slice_min(f.x, prop=.11, with_ties=False) >> nrow()
assert out == 1
def test_min_and_max_return_ties_by_default():
df = tibble(x=c(1,1,1,2,2))
out = df >> slice_min(f.x) >> nrow()
assert out == 3
out = df >> slice_max(f.x) >> nrow()
assert out == 2
out = df >> slice_min(f.x, with_ties=False) >> nrow()
assert out == 1
out = df >> slice_max(f.x, with_ties=False) >> nrow()
assert out == 1
def test_min_and_max_reorder_results():
df = tibble(id=range(1,5), x=c(2,3,1,2))
out = df >> slice_min(f.x, n=2) >> pull(f.id, to='list')
assert out == [3,1,4]
out = df >> slice_min(f.x, n=2, with_ties=False) >> pull(f.id, to='list')
assert out == [3,1]
out = df >> slice_max(f.x, n=2) >> pull(f.id, to='list')
assert out == [2,1,4]
out = df >> slice_max(f.x, n=2, with_ties=False) >> pull(f.id, to='list')
assert out == [2,1]
def test_min_and_max_ignore_nas():
df = tibble(
id=range(1,5),
x=c(2,NA,1,2),
y=[NA]*4
)
out = df >> slice_min(f.x, n=2) >> pull(f.id, to='list')
assert out == [3,1,4]
out = df >> slice_min(f.y, n=2) >> nrow()
assert out == 0
out = df >> slice_max(f.x, n=2) >> pull(f.id, to='list')
assert out == [1,4]
out = df >> slice_max(f.y, n=2) >> nrow()
assert out == 0
def test_arguments_to_sample_are_passed_along():
df = tibble(x=range(1,101), wt=c(1, rep(0, 99)))
out = df >> slice_sample(n=1, weight_by=f.wt) >> pull(f.x, to='list')
assert out == [1]
out = df >> slice_sample(n=2, weight_by=f.wt, replace=True) >> pull(f.x, to='list')
assert out == [1,1]
def test_slice_any_checks_for_empty_args_kwargs():
df = tibble(x=range(1,11))
# python recognize n=5
# with pytest.raises(ValueError):
# slice_head(df, 5)
# with pytest.raises(ValueError):
# slice_tail(df, 5)
with pytest.raises(TypeError):
df >> slice_min(n=5)
with pytest.raises(TypeError):
df >> slice_max(n=5)
# with pytest.raises(ValueError):
# slice_sample(df, 5)
def test_slice_any_checks_for_constant_n_and_prop():
df = tibble(x=range(1,11))
with pytest.raises(ContextError):
slice_head(df, n=f.x) # ok with n()
with pytest.raises(ContextError):
slice_head(df, prop=f.x)
with pytest.raises(ContextError):
slice_tail(df, n=f.x)
with pytest.raises(ContextError):
slice_tail(df, prop=f.x)
with pytest.raises(ContextError):
slice_min(df, f.x, n=f.x)
with pytest.raises(ContextError):
slice_min(df, f.x, prop=f.x)
with pytest.raises(ContextError):
slice_max(df, f.x, n=f.x)
with pytest.raises(ContextError):
slice_max(df, f.x, prop=f.x)
with pytest.raises(ContextError):
slice_sample(df, n=f.x)
with pytest.raises(ContextError):
slice_sample(df, prop=f.x)
def test_slice_sample_dose_not_error_on_0rows():
df = tibble(dummy=[], weight=[])
res = slice_sample(df, prop=.5, weight_by=f.weight)
assert nrow(res) == 0
# # Errors ------------------------------------------------------------------
def test_rename_errors_with_invalid_grouped_df():
df = tibble(x=[1,2,3])
# Incompatible type
with pytest.raises(TypeError):
slice(df, object())
with pytest.raises(TypeError):
slice(df, {'a': 1})
# Mix of positive and negative integers
with pytest.raises(ValueError):
mtcars >> slice(c(~c(1), 2))
with pytest.raises(ValueError):
mtcars >> slice(c(f[2:4], ~c(1)))
# n and prop are carefully validated
# with pytest.raises(ValueError):
# _n_from_prop(10, n=1, prop=1)
with pytest.raises(TypeError):
_n_from_prop(10, n="a")
with pytest.raises(TypeError):
_n_from_prop(10, prop="a")
with pytest.raises(ValueError):
_n_from_prop(10, n=-1)
with pytest.raises(ValueError):
_n_from_prop(10, prop=-1)
with pytest.raises(TypeError):
_n_from_prop(10, n=n())
with pytest.raises(TypeError):
_n_from_prop(10, prop=n())
## tests for datar
def test_mixed_rows():
df = tibble(x=range(5))
# order kept
# 0 1 2 3 4
# -3 -1
# 3 # 1-based
out = slice(df, c(-c(1,3), 4)) >> pull(f.x, to='list')
assert out == [2, 4, 3]
# 0 1 2 3 4
# -2 -1
# 4
out = slice(df, c(-f[:2], 4)) >> pull(f.x, to='list')
assert out == [3, 4]
# 0 1 2 3 4
# 1 3
# -1
out = slice(df, c(~c(1,3), ~c(-1))) >> pull(f.x, to='list')
assert out == [1, 3]
out = df >> slice(c(~f[3:], ~c(1))) >> pull(f.x, to='list')
assert out == [1]
def test_slice_sample_n_defaults_to_1():
df = tibble(
g = rep([1,2], each=3),
x = seq(1,6)
)
out = df >> slice_sample(n=None)
assert dim(out) == (1, 2)
def test_slicex_on_grouped_data():
gf = tibble(
g = rep([1,2], each=3),
x = seq(1,6)
) >> group_by(f.g)
out = gf >> slice_min(f.x)
assert out.equals(tibble(g=[1,2], x=[1,4]))
out = gf >> slice_max(f.x)
assert out.equals(tibble(g=[1,2], x=[3,6]))
out = gf >> slice_sample()
assert dim(out) == (2, 2)
def test_n_from_prop():
assert _n_from_prop(1, prop=.5) == 0
assert _n_from_prop(2, prop=.5) == 1
assert _n_from_prop(4, prop=.5) == 2
# slice_head/tail on grouped data
def test_slice_head_tail_on_grouped_data():
df = tibble(g=[1,1,1,2,2,2], x=[1,2,3,4,5,6]) >> group_by(f.g)
out = slice_head(df, 1) >> ungroup()
assert_frame_equal(out, tibble(g=[1,2], x=[1,4]))
out = slice_tail(df, 1) >> ungroup()
assert_frame_equal(out, tibble(g=[1,2], x=[3,6]))
def test_slice_family_on_rowwise_df():
df = tibble(x=f[1:6]) >> rowwise()
out = df >> slice([1,2,3])
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_head(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_tail(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_min(f.x, n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_max(f.x, n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_sample(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
|
def format_out(out):
# TODO: Why do I need to do this?
maybe_newline = '\n' if out.endswith('\n') else ''
return '\n'.join(
map(lambda line: ';; {}'.format(line), out.splitlines())
) + maybe_newline
def format(message):
if 'value' in message:
return message['value']
if 'nrepl.middleware.caught/throwable' in message:
return message.get('nrepl.middleware.caught/throwable')
if 'out' in message:
return format_out(message['out'])
if 'append' in message:
return message['append']
if 'err' in message:
return format_out(message.get('err'))
if 'versions' in message:
versions = message.get('versions')
clojure_version = versions.get('clojure').get('version-string')
nrepl_version = versions.get('nrepl').get('version-string')
return format_out(
'Clojure {}\nnREPL {}'.format(clojure_version, nrepl_version)
) + '\n'
|
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""googledatastore client."""
import os
import threading
from googledatastore import helper
from googledatastore.connection import *
from googledatastore.datastore_v1_pb2 import *
_conn_holder = {} # thread id -> thread-local connection.
_options = {} # Global options.
# Guards all access to _options and writes to _conn_holder.
_rlock = threading.RLock()
def set_options(**kwargs):
"""Set datastore connection options.
Args:
credentials: oauth2client.Credentials to authorize the
connection.
dataset: the dataset to send RPCs to.
host: the host used to construct the datastore API, default to Google
APIs production server.
"""
with(_rlock):
_options.update(kwargs)
_conn_holder.clear()
def get_default_connection():
"""Return the default datastore connection.
dataset defaults to helper.get_dataset_from_env(), host to
os.getenv('DATASTORE_HOST'), and credentials to
helper.get_credentials_from_env().
Use set_options to override defaults.
"""
tid = id(threading.current_thread())
conn = _conn_holder.get(tid)
if not conn:
with(_rlock):
# No other thread would insert a value in our slot, so no need
# to recheck existence inside the lock.
if 'dataset' not in _options:
_options['dataset'] = os.getenv('DATASTORE_DATASET')
if 'host' not in _options:
_options['host'] = os.getenv('DATASTORE_HOST')
if 'credentials' not in _options:
_options['credentials'] = helper.get_credentials_from_env()
# We still need the lock when caching the thread local connection so we
# don't race with _conn_holder.clear() in set_options().
_conn_holder[tid] = conn = Datastore(**_options)
return conn
def lookup(request):
"""See connection.Datastore.lookup."""
return get_default_connection().lookup(request)
def run_query(request):
"""See connection.Datastore.run_query."""
return get_default_connection().run_query(request)
def begin_transaction(request):
"""See connection.Datastore.begin_transaction."""
return get_default_connection().begin_transaction(request)
def commit(request):
"""See connection.Datastore.commit."""
return get_default_connection().commit(request)
def rollback(request):
"""See connection.Datastore.rollback."""
return get_default_connection().rollback(request)
def allocate_ids(request):
"""See connection.Datastore.allocate_ids."""
return get_default_connection().allocate_ids(request)
|
# coding: utf-8
# # Joblib for Daniel:
#
# Trying to implement parallelism into Daniels problem.
#
# ## Some Tests with random values
# I don't know if these quite match your data types
# In[135]:
import time
import tempfile
import shutil
import os
import numpy as np
from joblib import Parallel, delayed
from joblib import load, dump
# In[136]:
def griddata(gridpoints, tlayer, teff_logg_feh, method='linear', rescale=True):
"""Do what ever it does"""
# put a short wait.
time.sleep(0.5)
return np.sum(tlayer) * teff_logg_feh[0] + teff_logg_feh[1] + teff_logg_feh[2] # thing to test inputs
# In[137]:
def inside_loop(newatm, models, layer, column, gridpoints, teff_logg_feh):
tlayer = np.zeros(len(models))
for inx, model in enumerate(models):
tlayer[indx] = model[layer, column]
# print(" for layer = {0}, column = {1}".format(layer, column))
print("[Worker {0:d}] Layer {1:d} and Column {2:d} is about to griddata".format(os.getpid(), layer, column))
newatm[layer, column] = griddata(gridpoints, tlayer, teff_logg_feh, method='linear', rescale=True)
# In[138]:
layers = range(3)
columns = range(2)
gridpoints = 5
teff = 1000
logg = 1
feh = -0.01
model1 = np.array([[1, 2], [3, 4], [5, 6]])
model2 = np.array([[7, 8], [9, 10], [11, 12]])
models = [model1, model2, model1 * 2, model2 * 2] # random models
# In[139]:
# %%timeit
newatm = np.zeros([len(layers), len(columns)])
generator = (inside_loop(newatm, models, layer, column, gridpoints, (teff, logg, feh)) for layer in layers for column in columns)
for i in generator:
# print(newatm)
pass
print(newatm)
# In[ ]:
# %%timeit
# Turning parallel
newatm = np.zeros([len(layers), len(columns)])
print("newatm before parallel", newatm)
Parallel(n_jobs=-1, verbose=1) (delayed(inside_loop)(newatm, models, layer, column, gridpoints, (teff, logg, feh)) for layer in layers for column in columns)
time.sleep(0.5)
print("newatm after parallel", newatm)
# This runs in parallel but it does not return any data yet.
# Need to memmap the results
# ## Parallel over both loops with memapping
# Look here to implement the memmap to your solution:
# In[128]:
def inside_loop(newatm, models, layer, column, gridpoints, teff_logg_feh):
tlayer = np.zeros(len(models))
for inx, model in enumerate(models):
tlayer[indx] = model[layer, column]
newatm[layer, column] = griddata(gridpoints, tlayer, teff_logg_feh, method='linear', rescale=True)
def griddata(gridpoints, tlayer, teff_logg_feh, method='linear', rescale=True):
"""Do what ever it does"""
time.sleep(0.5)
return True # thing to test inputs
folder = tempfile.mkdtemp()
newatm_name = os.path.join(folder, 'newatm')
try:
# Pre-allocate a writeable shared memory map as a container for the
# results of the parallel computation
newatm = np.memmap(newatm_name, dtype=model.dtype, shape=model.shape, mode='w+') # need to adjsut the shape
print("newatm before parallel", newatm)
Parallel(n_jobs=-1, verbose=1) (delayed(inside_loop)(newatm, models, layer, column, gridpoints, (teff, logg, feh)) for layer in layers for column in columns)
time.sleep(0.5)
print("newatm after parallel", newatm)
finally:
# deleting temp files after testing the reuslt in example
try:
shutil.rmtree(folder)
except:
print("Failed to delete: " + folder)
# In[ ]:
# # Direct copy of Joblib memmaping example
# In[ ]:
def sum_row(input, output, i):
"""Compute the sum of a row in input and store it in output"""
sum_ = input[i, :].sum()
print("[Worker {0:d}] Sum for row {1:d} is {2:f}".format(os.getpid(), i, sum_))
output[i] = sum_
if __name__ == "__main__":
rng = np.random.RandomState(42)
folder = tempfile.mkdtemp()
samples_name = os.path.join(folder, 'samples')
sums_name = os.path.join(folder, 'sums')
try:
# Generate some data and an allocate an output buffer
samples = rng.normal(size=(10, int(1e6)))
# Pre-allocate a writeable shared memory map as a container for the
# results of the parallel computation
sums = np.memmap(sums_name, dtype=samples.dtype,
shape=samples.shape[0], mode='w+')
print("samples shape", samples.shape)
# Dump the input data to disk to free the memory
dump(samples, samples_name)
# Release the reference on the original in memory array and replace it
# by a reference to the memmap array so that the garbage collector can
# release the memory before forking. gc.collect() is internally called
# in Parallel just before forking.
samples = load(samples_name, mmap_mode='r')
# Fork the worker processes to perform computation concurrently
Parallel(n_jobs=4)(delayed(sum_row)(samples, sums, i)
for i in range(samples.shape[0]))
# Compare the results from the output buffer with the ground truth
print("Expected sums computed in the parent process:")
expected_result = samples.sum(axis=1)
print(expected_result)
print("Actual sums computed by the worker processes:")
print(sums)
assert np.allclose(expected_result, sums)
finally:
try:
shutil.rmtree(folder)
except:
print("Failed to delete: " + folder)
# In[ ]:
|
"""C6T Assembler First Pass
"""
from sys import stderr
import util
def pass1(parsed: list, program_counter: int = 0) -> int:
"""Handle the first pass, building the symbol table and macros.
Starts at the given program counter value. Returns the new program counter.
"""
util.macros.clear()
util.remove_local_symbols()
statements = util.Consumer(parsed)
for statement in statements:
if statement is None:
continue
elif isinstance(statement, util.Label):
if statement.name in util.symtab:
stderr.write(f"Label {statement.name} redefined\n")
else:
util.add_sym(util.Symbol(statement.name, program_counter))
elif isinstance(statement, util.Macro):
util.add_macro(statement)
elif isinstance(statement, list):
opcode = statement[0]
if len(statement) > 1:
args = statement[1:]
else:
args = []
if isinstance(opcode, str):
if opcode in util.macros:
statements.add(util.macros[opcode].expand(args))
elif opcode in util.opcodes:
program_counter += util.opcodes[opcode].byte_count(args)
else:
stderr.write(f"Illegal opcode {opcode}\n")
return program_counter
|
def fitting(data):
n = len(data)
if n <= 2:
print('Error! Not enough data!')
# Declare all variables (x and y components)
s_x = 0
s_y = 0
s_xx = 0
s_xy = 0
for i in range(0,n):
entry = data[i]
s_x += entry[0]
s_y += entry[1]
s_xx += entry[0]**2
s_xy += entry[1]**2
den = n * s_xx - s_x**2
if abs(den) < 0.0000001:
print('Error! Denominator is zero!')
a = (s_xx * s_y - s_x * s_xy) / den
b = (n*s_xy - s_x * s_y) / den
sigma2 = 0
for i in range (0,n):
entry = data[i]
sigma2 += (entry[1] - (a*entry[0]))**2
sigma2 = sigma2 / (n-2)
final = [a,b,sigma2]
return final
|
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a simple message queue built on top of ZooKeeper. In order
# to be used in production it needs better error handling but it's
# still useful as a proof-of-concept.
# Why use ZooKeeper as a queue? Highly available by design and has
# great performance.
import sys
import threading
import time
from zkrest import ZooKeeper
class Queue(object):
def __init__(self, root, zk):
self.root = root
self.zk = zk
def put(self, data):
self.zk.create("%s/el-" % self.root, str(data), sequence=True, ephemeral=True)
# creating ephemeral nodes for easy cleanup
# in a real world scenario you should create
# normal sequential znodes
def fetch(self):
""" Pull an element from the queue
This function is not blocking if the queue is empty, it will
just return None.
"""
children = sorted(self.zk.get_children(self.root), \
lambda a, b: cmp(a['path'], b['path']))
if not children:
return None
try:
first = children[0]
self.zk.delete(first['path'], version=first['version'])
if 'data64' not in first:
return ''
else:
return first['data64'].decode('base64')
except (ZooKeeper.WrongVersion, ZooKeeper.NotFound):
# someone changed the znode between the get and delete
# this should not happen
# in practice you should retry the fetch
raise
def main():
zk = ZooKeeper()
zk.start_session(expire=60)
if not zk.exists('/queue'):
zk.create('/queue')
q = Queue('/queue', zk)
print 'Pushing to queue 1 ... 5'
map(q.put, [1,2,3,4,5])
print 'Extracting ...'
while True:
el = q.fetch()
if el is None:
break
print el
zk.close_session()
zk.delete('/queue')
print 'Done.'
if __name__ == '__main__':
sys.exit(main())
|
from .backend import InteractiveBackend
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objs as go
class InteractiveBackendPlotly(InteractiveBackend):
"""
Implementation of the interactive plotting backend for Plotly.
"""
@classmethod
def make_scattergl_map(cls, xs, ys):
"""
Makes a Plotly Scattergl object for coastlines and country borders.
Parameters
----------
xs, ys : list of lists
Lists of lists contain separated X- or Y-coordinates of paths.
Returns
-------
list
List contains a Plotly Scattergl objects.
"""
traces = []
for i in range(len(xs)):
traces.append(go.Scattergl(x=xs[i], y=ys[i], mode='lines', line=go.scattergl.Line(color="black", width=0.7),
name='map', showlegend=False, opacity=0.7))
return traces
@classmethod
def make_filled_contour(cls, x, y, z, variable_name, colormap, x_colorbar):
"""
Creates filled contours using specialized Plotly function.
Parameters
----------
x, y : array
Arrays contain X- or Y-coordinates of grid cells.
z : array
2-dimensional array contain values of a variable for every grid cell.
variable_name : str
Name of a variable.
colormap : str
Name of a colormap.
x_colorbar : float
Position of a colorbar on axis X.
Returns
-------
Plotly Contour object
Filled contours with a colorbar.
"""
return go.Contour(z=z, x=x, y=y, showlegend=True, name=variable_name,
autocontour=True, ncontours=10, showscale=True, colorscale=colormap,
colorbar=dict(outlinecolor='black', outlinewidth=0.5, bordercolor='white', x=x_colorbar))
@classmethod
def make_unfilled_contour(cls, xs, ys, color, value, variable_name, showlegend=True):
"""
Creates a Scattergl object for an unfilled contour.
Parameters
----------
xs, ys : list
Lists contain X- or Y-coordinates of a contour.
color : str
Color of a contour.
value : str
Value of a contour.
variable_name : str
Name of a variable.
showlegend : bool, default=True
Definition of displaying of a contour at a legend.
Returns
-------
Plotly Scattergl object
Scattergl object contains an unfilled contour.
"""
return go.Scattergl(x=xs, y=ys, mode='lines', line=go.scattergl.Line(color=color, width=1),
showlegend=showlegend, legendgroup=variable_name, name=value)
@classmethod
def make_colorbar(cls, colorset, valueset, tick_interval, variable_name, x_colorbar):
"""
Creates a colorbar for Plotly.
Parameters
----------
colorset : list of tuples
Set of ordered unique colors. List containing tuples mapping a normalized value to a HEX or RGB.
valueset : Numpy array
Set of ordered unique double values.
tick_interval : double
Value of intervals between ticks in a colorbar.
variable_name : str
Name of a variable.
x_colorbar : float
Position of a colorbar on axis X.
Returns
-------
Plotly Scattergl object
X- and Y-coordinates of a Scattergl object are not define. The object contains only a colorbar.
"""
return go.Scattergl(x=[None], y=[None], mode='markers', showlegend=False, legendgroup=variable_name,
marker=dict(colorscale=colorset, cmin=valueset[0], cmax=valueset[-1]+tick_interval,
colorbar=dict(tickvals=valueset, outlinecolor='black', outlinewidth=0.5, x=x_colorbar)))
@classmethod
def make_scattergl_streamline(cls, streamlines_path, color, showlegend=True):
"""
Makes a Scattergl object for streamlines.
Parameters
----------
streamlines_path : array
3-dimensional array contains X- and Y-coordinates of streamlines.
color : str
Defines a color of streamlines.
showlegend : bool, optional, default=True
Definition of displaying of streamlines at a legend.
Returns
-------
Plotly Scattergl object
"""
return go.Scattergl(x=streamlines_path.T[0], y=streamlines_path.T[1],
mode='lines', line=go.scattergl.Line(color=color, width=1),
showlegend=showlegend, legendgroup='streamlines', name='streamlines')
|
'''
simple if/else
'''
def main():
a = 'x'
if a == 'x':
print('ok')
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# =========================================================================
# Program: S1Processor
#
# Copyright (c) CESBIO. All rights reserved.
#
# See LICENSE for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notices for more information.
#
# =========================================================================
#
# Authors: Thierry KOLECK (CNES)
#
# =========================================================================
""" This module contains various utility functions"""
import ogr
from osgeo import osr
import xml.etree.ElementTree as ET
def get_relative_orbit(manifest):
root=ET.parse(manifest)
return int(root.find("metadataSection/metadataObject/metadataWrap/xmlData/{http://www.esa.int/safe/sentinel-1.0}orbitReference/{http://www.esa.int/safe/sentinel-1.0}relativeOrbitNumber").text)
def get_origin(manifest):
"""Parse the coordinate of the origin in the manifest file
Args:
manifest: The manifest from which to parse the coordinates of the origin
Returns:
the parsed coordinates (or throw an exception if they could not be parsed)
"""
with open(manifest, "r") as save_file:
for line in save_file:
if "<gml:coordinates>" in line:
coor = line.replace(" <gml:coordinates>", "")\
.replace("</gml:coordinates>", "").split(" ")
coord = [(float(val.replace("\n", "").split(",")[0]),\
float(val.replace("\n", "")\
.split(",")[1]))for val in coor]
return coord[0], coord[1], coord[2], coord[3]
raise Exception("Coordinates not found in "+str(manifest))
def get_tile_origin_intersect_by_s1(grid_path, image):
"""
Retrieve the list of MGRS tiles interesected by S1 product.
Args:
grid_path: Path to the shapefile containing the MGRS tiles
image: S1 image as instance of S1DateAcquisition class
Returns:
a list of string of MGRS tiles names
"""
manifest = image.get_manifest()
s1_footprint = get_origin(manifest)
poly = ogr.Geometry(ogr.wkbPolygon)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(s1_footprint[0][1], s1_footprint[0][0])
ring.AddPoint(s1_footprint[1][1], s1_footprint[1][0])
ring.AddPoint(s1_footprint[2][1], s1_footprint[2][0])
ring.AddPoint(s1_footprint[3][1], s1_footprint[3][0])
ring.AddPoint(s1_footprint[0][1], s1_footprint[0][0])
poly.AddGeometry(ring)
driver = ogr.GetDriverByName("ESRI Shapefile")
data_source = driver.Open(grid_path, 0)
layer = data_source.GetLayer()
intersect_tile = []
for current_tile in layer:
tile_footprint = current_tile.GetGeometryRef()
intersection = poly.Intersection(tile_footprint)
if intersection.GetArea() != 0:
intersect_tile.append(current_tile.GetField('NAME'))
return intersect_tile
def get_orbit_direction(manifest):
"""This function returns the orbit direction from a S1 manifest file.
Args:
manifest: path to the manifest file
Returns:
"ASC" for ascending orbits, "DES" for descending
orbits. Throws an exception if manifest can not be parsed.
"""
with open(manifest, "r") as save_file:
for line in save_file:
if "<s1:pass>" in line:
if "DESCENDING" in line:
return "DES"
if "ASCENDING" in line:
return "ASC"
raise Exception("Orbit Directiction not found in "+str(manifest))
def convert_coord(tuple_list, in_epsg, out_epsg):
"""
Convert a list of coordinates from one epsg code to another
Args:
tuple_list: a list of tuples representing the coordinates
in_epsg: the input epsg code
out_epsg: the output epsg code
Returns:
a list of tuples representing the converted coordinates
"""
tuple_out = []
for in_coord in tuple_list:
lon = in_coord[0]
lat = in_coord[1]
in_spatial_ref = osr.SpatialReference()
in_spatial_ref.ImportFromEPSG(in_epsg)
out_spatial_ref = osr.SpatialReference()
out_spatial_ref.ImportFromEPSG(out_epsg)
coord_trans = osr.CoordinateTransformation(in_spatial_ref,\
out_spatial_ref)
coord = coord_trans.TransformPoint(lon, lat)
tuple_out.append(coord)
return tuple_out
def get_date_from_s1_raster(path_to_raster):
"""
Small utilty function that parses a s1 raster file name to extract date.
Args:
path_to_raster: path to the s1 raster file
Returns:
a string representing the date
"""
return path_to_raster.split("/")[-1].split("-")[4]
def get_polar_from_s1_raster(path_to_raster):
"""
Small utilty function that parses a s1 raster file name to
extract polarization.
Args:
path_to_raster: path to the s1 raster file
Returns:
a string representing the polarization
"""
return path_to_raster.split("/")[-1].split("-")[3]
def get_platform_from_s1_raster(path_to_raster):
"""
Small utilty function that parses a s1 raster file name to extract platform
Args:
path_to_raster: path to the s1 raster file
Returns:
a string representing the platform
"""
return path_to_raster.split("/")[-1].split("-")[0]
|
import uuid
from datetime import datetime
from app import db
from sqlalchemy.ext.hybrid import hybrid_property
class Project(db.Model):
__tablename__ = "projects"
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String, unique=True, default=uuid.uuid4)
name = db.Column(db.String(255), nullable=True)
description = db.Column(db.Text(), nullable=False)
members = db.relationship(
"Member",
secondary="members_projects",
backref=db.backref("projects"),
lazy="joined",
)
permissions = db.relationship(
"MemberProject", backref=db.backref("projects"), lazy="joined"
)
project_id = db.Column(db.Integer, db.ForeignKey("projects.id", ondelete="CASCADE"))
project = db.relationship(
"Project", remote_side=[id], backref=db.backref("comments")
)
created_by_id = db.Column(db.Integer, db.ForeignKey("members.id"), nullable=False)
last_updated_by_id = db.Column(db.Integer, db.ForeignKey("members.id"))
@hybrid_property
def owner(self):
for member in self.members:
if member.id == self.created_by_id:
return member
@hybrid_property
def last_modified_by(self):
if self.last_updated_by_id is None:
return None
for member in self.members:
if member.id == self.last_updated_by_id:
return member
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, nullable=True, onupdate=datetime.utcnow)
|
import unittest
import BasePage
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
caps=DesiredCapabilities.FIREFOX
caps['wires']=True
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.driver= webdriver.Firefox(capabilities=caps)
self.driver.get("https://www.facebook.com/")
def test_login_incorrect_facebook(self):
login_page=BasePage.LoginPage(self.driver)
login_page.login('**************','***********') #enter username and password here
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
from setuptools import setup
setup(use_scm_version=True)
|
# -*- coding: utf-8 -*-
import unittest
import mock
from docktors.core import DecWrapper, decorated
class TestDecWrapper(unittest.TestCase):
def test__check_inputs_mandatory_arg_missing(self):
# GIVEN
props = dict(str_prop=dict(argtype=[str], mandatory=True))
inputs = dict(other_prop='value')
# WHEN
with self.assertRaises(SyntaxError) as cm:
DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(
str(cm.exception),
"[Test] : Mandatory option 'str_prop' is missing."
)
def test__check_inputs_prop_undefined(self):
# GIVEN
props = {'int_prop': dict(argtype=[int], default=1)}
inputs = {'undefined_prop': 'toto'}
# WHEN
with self.assertRaises(SyntaxError) as cm:
DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(
str(cm.exception),
"[Test] : Option 'undefined_prop' doesn't not exist."
)
def test__check_inputs_bad_int_type(self):
# GIVEN
props = {'int_prop': dict(argtype=int, default=1)}
inputs = {'int_prop': 'toto'}
# WHEN
with self.assertRaises(TypeError) as cm:
DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(
str(cm.exception),
"[Test] : Option 'int_prop' bad type. Expected 'int'. Got 'str' instead."
)
def test__check_inputs_ok_int_type(self):
# GIVEN
props = {'int_prop': dict(argtype=int, default=1)}
inputs = {'int_prop': 5}
# WHEN
wrapper = DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(wrapper.p('int_prop'), 5)
def test__check_inputs_ok_int_type_alternative(self):
# GIVEN
props = {'int_prop': dict(argtype=int, default=1, alternatives=[(str, lambda i: int(i))])}
inputs = {'int_prop': '5'}
# WHEN
wrapper = DecWrapper('test', inputs, props)
# THEN
self.assertEqual(wrapper.p('int_prop'), 5)
def test__check_inputs_ok_tuple_list_type(self):
# GIVEN
props = {'tuple_list_prop': dict(argtype=[(int, str)], default=list())}
inputs = {'tuple_list_prop': [(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')]}
# WHEN
wrapper = DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(wrapper.p('tuple_list_prop'), [(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')])
def test__check_inputs_bad_dict_using_alternative(self):
# GIVEN
props = {
'dict_prop': dict(argtype=dict, alternatives=[([(int, int)], lambda x: x)], default=dict())
}
inputs = {
'dict_prop': [
('10', '15'),
('20', '30'),
]
}
# WHEN
with self.assertRaises(TypeError) as cm:
DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(
str(cm.exception),
"[Test] : Option 'dict_prop' bad type. Expected 'dict'. Got 'list' instead."
)
def test__check_inputs_ok_dict_using_alternative(self):
# GIVEN
props = {
'dict_prop': dict(
argtype=dict,
alternatives=[
([(int, int)], lambda v: dict(i for i in v))
],
default=dict()
)
}
inputs = {'dict_prop': [(10, 15), (20, 30)]}
# WHEN
wrapper = DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(wrapper.p('dict_prop'), {10: 15, 20: 30})
def test__check_inputs_set_defaults(self):
# GIVEN
props = {'int_prop': dict(argtype=[int], default=1)}
inputs = {}
# WHEN
wrapper = DecWrapper('Test', inputs, props)
# THEN
self.assertEqual(wrapper.p('int_prop'), 1)
def dec_function(name):
return 'Hello %s' % name
def dec_function_arg(*args):
return args
def dec_function_error():
raise RuntimeError('Error for test')
class TestDecorated(unittest.TestCase):
"""Test the decorated function"""
def test_decorated_output(self):
# GIVEN
wrapping_mock = mock.Mock(spec=DecWrapper)
# WHEN
output = decorated(wrapping=wrapping_mock, func=dec_function)
# THEN
self.assertIsNotNone(output, msg='Output should not be none')
self.assertEqual(output.__name__, 'dec_function', msg='Returned function should have the same name')
def test_decorated_without_argument_injection(self):
# GIVEN
wrapping_mock = mock.Mock(spec=DecWrapper)
wrapping_mock.inject_arg = False
# WHEN
output = decorated(wrapping=wrapping_mock, func=dec_function)('World')
# THEN
self.assertEqual(output, 'Hello World', 'Function output should not change')
wrapping_mock.start.assert_called_once_with()
wrapping_mock.shutdown.assert_called_once_with()
def test_decorated_with_argument_injection(self):
# GIVEN
wrapping_mock = mock.Mock(spec=DecWrapper)
wrapping_mock.inject_arg = True
wrapping_mock.get_args.return_value = ['First arg']
# WHEN
output = decorated(wrapping=wrapping_mock, func=dec_function_arg)('Hello World')
# THEN
self.assertIsInstance(output, tuple, 'Should retrieve function arguments')
self.assertEqual(output[0], 'First arg', 'Function first output should be the wrapping args')
self.assertEqual(output[1], 'Hello World', 'Function second output should be the argument parameter')
wrapping_mock.start.assert_called_once_with()
wrapping_mock.shutdown.assert_called_once_with()
def test_decorated_exception_raised(self):
# GIVEN
wrapping_mock = mock.Mock(spec=DecWrapper)
wrapping_mock.inject_arg = False
# WHEN
f = decorated(wrapping=wrapping_mock, func=dec_function_error)
with self.assertRaises(RuntimeError) as cm:
f()
# THEN
self.assertEqual(
str(cm.exception),
'Error for test',
'A error should be raised'
)
wrapping_mock.start.assert_called_once_with()
wrapping_mock.shutdown.assert_called_once_with()
if __name__ == '__main__':
unittest.main()
|
import boto3
import argparse, os, sys
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from settings import settings
parser = argparse.ArgumentParser(description='Create a trigger when a file is uploaded to an specific S3 bucket.')
parser.add_argument('--bucket', type=str,
help='Bucket name.')
parser.add_argument('--lambda_name',type=str,
help='Lambda function name to launch on every event')
def create_trigger(name, bucket_name):
client = boto3.client('lambda')
#Getting info from lambda
# arnLambda = response['FunctionArn']
lambdaFunction = client.get_function(FunctionName=name)
arnLambda = lambdaFunction["Configuration"]["FunctionArn"]
#Adding trigger s3 -> lambda
#need permisions before to add trigger
response = client.add_permission(
Action='lambda:InvokeFunction',
FunctionName=name,
StatementId='ID-0',
Principal='s3.amazonaws.com',
# SourceAccount='123456789012',
# SourceArn='arn:aws:s3:::examplebucket/*'
)
s3 = boto3.resource('s3')
bucket_notification = s3.BucketNotification(bucket_name)
data = {}
data['LambdaFunctionConfigurations'] = [
{
'Id': bucket_name + "_" + name,
'LambdaFunctionArn': arnLambda,
'Events': ["s3:ObjectCreated:*"],
'Filter': {
"Key": {
"FilterRules": [
{
"Name": "suffix",
"Value": "gz"
}
]
}
}
}
]
bucket_notification.put(
NotificationConfiguration=data
)
if (__name__ == '__main__'):
print("Creating trigger S3 -> Lambda . . . ",end='', flush=True)
args = parser.parse_args()
bucket_name = args.bucket
lambda_name = args.lambda_name
if not bucket_name:
bucket_name = settings.bucket_name
if not lambda_name:
lambda_name = settings.lambda_func_name_trigger
if not bucket_name or not lambda_name:
print("\nLambda name or bucket name are not provided.")
else:
create_trigger(lambda_name, bucket_name)
print("Done!")
|
__author__ = 'HaoBin'
from XGraph2 import *
import copy, cProfile
def generate(m,n):
# recursively generates a list of values according to m, n parameter
# where m is the size of the alphabet set
# and n is the length of each string
# Complexity: O(M^N)
dict = []
for i in range(m):
dict.append(chr(65+i))
total_set = generate_recursive(n, dict,[],"")
return total_set
def generate_recursive(n, dict, total, current):
if len(current) == n:
total.append(current)
else:
for c in dict:
current += c
total = generate_recursive(n, dict, total, current)
current = current[0:len(current)-1]
return total
def check_graph(graph):
# does a simple graph check to determine if Euler Circuit exist
# works for Task 1 only I suppose
# condition: All vertices has equal in and out degree
# Complexity: worst case O(|V|^2)
vs = graph.getVerticesList()
cycle = True
for v in vs:
if graph.getInDegree(v) != graph.getDegree(v):
cycle = False
break
return cycle
def e_graph_build(graph):
# Complexity: O(|E| * |V| * Worst length of outgoing)
e_path_exists = check_graph(graph)
print("Checking graph... E-path exists?", e_path_exists)
e_graph = []
if e_path_exists:
vs = graph.getVerticesList()
# get arbitrary vertex to start, but we simply choose the first in vertex list
u = vs[0]
e_graph.append(u)
while graph.getEdgeCount() > 0:
# O(|E|)
if graph.getDegree(u) == 0 and graph.getEdgeCount() > 0:
print("No E-path found!")
break
elif graph.getDegree(u) == 0 and graph.getEdgeCount() == 0:
print("E-path found!")
break
elif graph.getDegree(u) == 1:
v = graph.getConnections(u)[0][0]
e_graph.append(v)
graph.deleteEdge(u,v)
u = v
elif graph.getDegree(u) >= 2:
# go through self loops
for connection in graph.getConnections(u):
if connection[0] == u:
graph.deleteEdge(u,connection[0])
e_graph.append(connection[0])
u_outgoing = graph.getConnections(u)
continuex = True
i = 0
while continuex and i < len(u_outgoing):
# O( Number of outdegree)
v = u_outgoing[i][0]
ori_edge_cost, ori_edge_direct = graph.getEdge(u, v).getCost(), graph.getEdge(u, v).isDirected()
# tries to traverse/delete the edge, and check for validity
# ** does DFS to check if the edge is a bridge
# DFS is O(|V|)
init_c = dfs(copy.deepcopy(graph),u)
graph.deleteEdge(u, v)
continuex = False
after_c = dfs(copy.deepcopy(graph),v)
# getInDegree is O(|V|)
if graph.getInDegree(v) == 0 and graph.getDegree(v) == 0:
graph.addEdge(u, v, ori_edge_cost, ori_edge_direct)
continuex = True
else:
if init_c > after_c:
# Bridge detected
graph.addEdge(u, v, ori_edge_cost, ori_edge_direct)
continuex = True
else:
e_graph.append(v)
u = v
u_outgoing = graph.getConnections(u)
i += 1
print(len(e_graph),end="\r")
return e_graph
else:
return []
def dfs(graph, u):
# performs a depth first search and return the number of nodes traversable
# * count reachable node from u *
# Complexity: O(|V|) worst case, traverse all nodes reachable
count = 0
connections = graph.getConnections(u)
graph.deleteVertex(u)
count += dfs_helper(graph, connections,u) + 1
return count
def dfs_helper(graph, connections, u):
# helper recursive part for dfs()
count = 0
for v in connections:
if v[0] != u and graph[v[0]] is not None:
v_connection = graph.getConnections(v[0])
graph.deleteVertex(v[0])
count += dfs_helper(graph,v_connection,v[0]) + 1
return count
def rbk(pat,txt):
n = len(txt)
m = len(pat)
h_pat = rolling_hash(pat)
h_txt = rolling_hash(txt[0:m])
for i in range(n-m+1):
if h_pat == h_txt:
for j in range(m):
match = True
if txt[i+j] != pat[j]:
match = False
if match is True:
return i
h_txt = update_rolling_hash(h_txt, txt[i:i+m+1])
return -1
def rolling_hash(string,d=131):
q = 32452843
hash = ord(string[0]) * d + ord(string[1])
for i in range(2,len(string)):
hash = hash * d + ord(string[i])
return hash % q
def update_rolling_hash(hash,txt,d=131):
q = 32452843
h = (hash - (ord(txt[0]) * (d**(len(txt)-2))) ) * d + ord(txt[len(txt)-1])
return h % q
def d_edges_build(graph,n):
# Complexity: O(|V|^2)
# build the D-graph and edges
vertices = graph.getVertices()
for vertex in vertices:
for target in vertices:
vid = vertex[1].getID()
tid = target[1].getID()
# PENDING COMPLETION: STRING MATCHING #
if rbk(vid[1:n], tid) == 0:
#if vid[1:n] == tid[0:n-1]:
graph.addEdge(vid,tid,0,True)
return graph
def d_edges_build_2(graph,n):
# Complexity: O(|V| * c)
hash_head = XHashTable()
hash_tail = XHashTable()
vertices = graph.getVerticesList()
for v in vertices:
h = hash_head[v[0:n-1]]
t = hash_tail[v[1:n]]
if h is None:
hash_head[v[0:n-1]] = [v]
if t is None:
hash_tail[v[1:n]] = [v]
if h is not None:
hash_head[v[0:n-1]] = h + [v]
if t is not None:
hash_tail[v[1:n]] = t + [v]
for v in vertices:
h_v = hash_head[v[1:n]]
for h in h_v:
if graph.getEdge(v,h) is None:
graph.addEdge(v,h,0,True)
t_v = hash_tail[v[0:n-1]]
for t in t_v:
if graph.getEdge(t,v) is None:
graph.addEdge(t,v,0,True)
return graph
def print_extend(graph):
# Complexity O(|E|)
# prints out the final E-path
if len(graph) > 0:
n = len(graph[0])
print(graph[0], end="")
i = 1
while i < len(graph):
print(graph[i][n-1:], end="")
i += 1
print()
def main():
# initialise the string set using m & n
m = 3
n = 2
total_set = generate(m,n)
# starts to build D-graph and edges
d_graph = XGraph()
for s in total_set:
d_graph.addVertex(s)
#graph = d_edges_build(d_graph,n)
graph = d_edges_build_2(d_graph,n)
graph.toStrConnection()
print("Edge Count:", graph.getEdgeCount())
# build the E-graph
e_graph = e_graph_build(graph)
print("E",e_graph)
print("E-path length:",len(e_graph)-1)
print_extend(e_graph)
print()
if __name__ == "__main__":
cProfile.run('main()')
#print(rbk("AA","AAB"))
|
import sys
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
spark = SparkSession \
.builder \
.appName("geocode") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
metadataPath = sys.argv[1]
metadataDF = spark.read.json(metadataPath, multiLine = True)
uscitiesDF = spark.read.format('csv').options(header = 'true', inferschema = 'true').load(sys.argv[2])
cityLatLongDF = uscitiesDF.select("city", "lat", "lng").dropDuplicates()
cityLatLongDF = cityLatLongDF.withColumnRenamed("lat", "latitude")
cityLatLongDF = cityLatLongDF.withColumnRenamed("lng", "longitude")
cityLatLongDF = cityLatLongDF.withColumnRenamed("city", "geocode_city")
# cityLatLongDF = cityLatLongDF.withColumn("city",f.lower(f.col("city")))
# cityLatLongDF = cityLatLongDF.withColumn("city",f.trim(f.col("city")))
cityLatLongDF.createOrReplaceTempView("cities")
# cityLatLongDF.show()
uszipsDF = spark.read.format('csv').options(header = 'true', inferschema = 'true').load(sys.argv[3])
zipLatLongDF = uszipsDF.select("zip", "lat", "lng").dropDuplicates()
zipLatLongDF = zipLatLongDF.withColumnRenamed("lat", "latitude")
zipLatLongDF = zipLatLongDF.withColumnRenamed("lng", "longitude")
zipLatLongDF = zipLatLongDF.withColumnRenamed("zip", "geocode_zip")
# zipLatLongDF = zipLatLongDF.withColumn("zip",f.lower(f.col("zip")))
# zipLatLongDF = zipLatLongDF.withColumn("zip",f.trim(f.col("zip")))
zipLatLongDF.createOrReplaceTempView("zips")
# zipLatLongDF.show()
fileNames = metadataDF.columns
for fileName in fileNames:
print("***********current file name: {}***********".format(fileName))
hdfsPath = "data/" + fileName
fileSparkDF = spark.read.format('csv').options(header = 'true', inferschema = 'true').load(hdfsPath)
# fileSparkDF.createOrReplaceTempView("dataset")
# print("current schema: {}".format(fileSparkDF.printSchema()))
fileQueryStr = "`{}`.attributes.*".format(fileName)
currMetadataDF = metadataDF.select(fileQueryStr)
# currMetadataDF.show()
currColumns = fileSparkDF.columns
hasLatLong = False
hasZipcode = False
zipcodeIndex = 0
hasBorough = False
boroughIndex = 0
hasCity = False
cityIndex= 0
for i in range(len(currColumns)):
columnQueryStr = "`{}`.*".format(currMetadataDF.columns[i])
colMetadataDF = currMetadataDF.select(columnQueryStr)
if colMetadataDF.select("is_spatial").first()[0]:
currType = colMetadataDF.select("type").first()[0]
currIndex = colMetadataDF.select("index").first()[0]
if currType == "latitude" or currType == "longitude":
hasLatLong = True
break
elif currType == "zipcode":
hasZipcode = True
zipcodeIndex = currIndex
elif currType == "borough":
hasBorough = True
boroughIndex = currIndex
elif currType == "city":
hasCity = True
cityIndex = currIndex
if hasLatLong:
continue
else:
if hasZipcode:
zipcodeColName = currColumns[zipcodeIndex]
# fileSparkDF = fileSparkDF.withColumn(zipcodeColName,f.lower(f.col(zipcodeColName)))
# fileSparkDF = fileSparkDF.withColumn(zipcodeColName,f.trim(f.col(zipcodeColName)))
fileSparkDF.createOrReplaceTempView("currfile")
fileLatLongDF = spark.sql("select * \
from currfile left outer join zips \
on trim(currfile.`{}`) = trim(zips.geocode_zip)".format(zipcodeColName))
fileLatLongDF = fileLatLongDF.drop("geocode_zip")
print("geocode has succeeded with the zipcode information.")
fileLatLongDF.write.csv("geocode/{}".format(fileName[:-4]), header=True)
continue
if hasBorough:
boroughColName = currColumns[boroughIndex]
# fileSparkDF = fileSparkDF.withColumn(boroughColName,f.lower(f.col(boroughColName)))
# fileSparkDF = fileSparkDF.withColumn(boroughColName,f.trim(f.col(boroughColName)))
fileSparkDF.createOrReplaceTempView("currfile")
fileLatLongDF = spark.sql("select * \
from currfile left outer join cities \
on trim(lower(currfile.`{}`)) = trim(lower(cities.geocode_city))".format(boroughColName))
fileLatLongDF = fileLatLongDF.drop("geocode_city")
print("geocode has succeeded with the borough information.")
fileLatLongDF.write.csv("geocode/{}".format(fileName[:-4]), header=True)
continue
if hasCity:
cityColName = currColumns[cityIndex]
# fileSparkDF = fileSparkDF.withColumn(cityColName,f.lower(f.col(cityColName)))
# fileSparkDF = fileSparkDF.withColumn(cityColName,f.trim(f.col(cityColName)))
fileSparkDF.createOrReplaceTempView("currfile")
fileLatLongDF = spark.sql("select * \
from currfile left outer join cities \
on trim(lower(currfile.`{}`)) = trim(lower(cities.geocode_city))".format(cityColName))
fileLatLongDF = fileLatLongDF.drop("geocode_city")
print("geocode has succeeded with the city information.")
fileLatLongDF.write.csv("geocode/{}".format(fileName[:-4]), header=True)
spark.stop()
|
"""Registry Script Snippets
Parsing registry hives is a common task for Windows host analysis.
The ``yarp`` library is a robust library for parsing registry
hives and this section will show examples of how to use it.
"""
|
from .wheel import main
def main_p():
main(type='pad')
if __name__ == '__main__':
main_p()
|
"""Print Dialog for Grail Browser.
This displays a dialog allowing the user to control printing of the current
document. The following printing characteristics can be controlled from the
dialog:
- the print command (which should take PostScript from stdin
or a %s can be placed on the command line where the filename
of a file containing the postscript output will be placed)
- a check box for printing to a file
- the filename (to receive the PostScript instead)
- some options for controlling the output
- an OK button
- a Cancel button
The last state (print command, check box, filename, options) is saved in
a global settings variable managed by the printing.settings module.
When OK is activated, the HTML or text file is read using urllib.urlopen()
and the PSWriter class is used to generate the PostScript.
When Cancel is actiavted, the dialog state is still saved.
The document to be printed is checked for its MIME type; if it isn't
text/html but is text/*, text/plain is used as the handler. If no type
is known at all (possibly a disk file without a recognized extension),
an intermediate dialog is used to inform the user that text/plain will
be assumed, giving the option to cancel.
"""
from Cursors import CURSOR_WAIT
from Tkinter import *
import grailutil
import os
import printing.paper
import printing.settings
import Reader
import string
import sys
import tktools
USER_DATA_DIR = grailutil.abspath(
os.path.join(grailutil.getgraildir(), "data"))
def get_scaling_adjustments(w):
scheight = float(w.winfo_screenheight())
scwidth = float(w.winfo_screenwidth())
scheight_mm = float(w.winfo_screenmmheight())
scwidth_mm = float(w.winfo_screenmmwidth())
vert_pixels_per_in = scheight / (scheight_mm / 25)
horiz_pixels_per_in = scwidth / (scwidth_mm / 25)
result = (72.0 / horiz_pixels_per_in), (72.0 / vert_pixels_per_in)
## print "scaling adjustments:", result
return result
def PrintDialog(context, url, title):
try:
infp = context.app.open_url_simple(url)
except IOError, msg:
context.error_dialog(IOError, msg)
return
content_encoding, transfer_encoding = Reader.get_encodings(infp.info())
try:
ctype = infp.info()['content-type']
except KeyError:
ctype, encoding = context.app.guess_type(url)
if not content_encoding:
content_encoding = encoding
if not ctype:
MaybePrintDialog(context, url, title, infp)
return
if not Reader.support_encodings(content_encoding, transfer_encoding):
# create an alert of some sort....
return
ctype, ctype_params = grailutil.conv_mimetype(ctype)
mod = context.app.find_type_extension("printing.filetypes", ctype)
if ctype != "application/postscript" and not mod.parse:
context.error_dialog(
"Unprintable document",
"No printing support is available for the %s media type." % ctype)
return
RealPrintDialog(context, url, title, infp, ctype)
class MaybePrintDialog:
UNKNOWN_TYPE_MESSAGE = \
"""No MIME type is known for this
document. It will be printed as
plain text if you elect to continue."""
def __init__(self, context, url, title, infp):
self.__context = context
self.__url = url
self.__title = title
self.__infp = infp
top = self.__top = tktools.make_toplevel(context.browser.root)
top.title("Print Action")
fr, topfr, botfr = tktools.make_double_frame(top)
Label(topfr, bitmap="warning", foreground='red'
).pack(side=LEFT, fill=Y, padx='2m')
# font used by the Tk4 dialog.tcl script:
font = "-Adobe-Times-Medium-R-Normal--*-180-*-*-*-*-*-*"
try:
label = Label(topfr, text=self.UNKNOWN_TYPE_MESSAGE,
font=font, justify=LEFT)
except TclError:
# font not found, use one we are sure exists:
font = context.browser.viewer.text.tag_cget('h2_b', '-font')
label = Label(topfr, text=self.UNKNOWN_TYPE_MESSAGE,
font=font, justify=LEFT)
label.pack(side=RIGHT, fill=BOTH, expand=1, padx='1m')
b1 = Button(botfr, text="Cancel", command=self.skipit)
b1.pack(side=RIGHT)
b2 = Button(botfr, text="Print", command=self.doit)
b2.pack(side=LEFT)
tktools.unify_button_widths(b1, b2)
tktools.set_transient(top, context.browser.root)
def doit(self, event=None):
self.__top.destroy()
RealPrintDialog(self.__context,
self.__url,
self.__title,
self.__infp,
"text/plain")
self.__context = None
self.__infp = None
def skipit(self, event=None):
self.__context = None
self.__top.destroy()
self.__infp.close()
self.__infp = None
class RealPrintDialog:
def __init__(self, context, url, title, infp, ctype):
import tktools
#
self.infp = infp
self.ctype = ctype
self.context = context
self.baseurl = context.get_baseurl()
self.prefs = context.app.prefs
self.settings = printing.settings.get_settings(context.app.prefs)
if USER_DATA_DIR not in self.settings.user_data_dirs:
self.settings.user_data_dirs.append(USER_DATA_DIR)
settings = self.settings
#
self.title = title
self.master = self.context.root
self.root = tktools.make_toplevel(self.master,
title="Print Dialog",
class_="PrintDialog")
# do this early in case we're debugging:
self.root.protocol('WM_DELETE_WINDOW', self.cancel_command)
self.root.bind("<Alt-w>", self.cancel_event)
self.root.bind("<Alt-W>", self.cancel_event)
self.cursor_widgets = [self.root]
fr, top, botframe = tktools.make_double_frame(self.root)
# Print to file controls:
generalfr = tktools.make_group_frame(
top, "general", "General options:", fill=X)
self.cmd_entry, dummyframe = tktools.make_form_entry(
generalfr, "Print command:")
self.cmd_entry.insert(END, settings.printcmd)
self.add_entry(self.cmd_entry)
self.printtofile = IntVar(self.root)
self.printtofile.set(settings.fileflag)
fr = Frame(generalfr)
fr.pack(fill=X)
self.file_check = Checkbutton(fr, text = "Print to file:",
command = self.check_command,
variable = self.printtofile)
self.file_check.pack(side=LEFT)
self.file_entry = Entry(fr)
self.file_entry.pack(side=RIGHT, fill=X)
self.file_entry.insert(END, settings.printfile)
self.add_entry(self.file_entry)
if self.ctype != "application/postscript":
# page orientation
Frame(generalfr, height=2).pack()
fr = Frame(generalfr)
fr.pack(fill=X)
self.orientation = StringVar(top)
self.orientation.set(string.capitalize(settings.orientation))
opts = printing.paper.paper_rotations.keys()
opts.sort()
opts = tuple(map(string.capitalize, opts))
Label(fr, text="Orientation: ", width=13, anchor=E).pack(side=LEFT)
Frame(fr, width=3).pack(side=LEFT)
menu = apply(OptionMenu, (fr, self.orientation) + opts)
width = reduce(max, map(len, opts), 6)
menu.config(anchor=W, highlightthickness=0, width=width)
menu.pack(expand=1, fill=NONE, anchor=W, side=LEFT)
Frame(generalfr, height=2).pack()
# font size
fr = Frame(generalfr)
fr.pack(fill=X)
Label(fr, text="Font size: ", width=13, anchor=E).pack(side=LEFT)
Frame(fr, width=3).pack(side=LEFT)
e = self.fontsize = Entry(fr, width=12)
e.insert(END, settings.get_fontspec())
e.pack(side=LEFT)
self.add_entry(e)
self.mod = self.get_type_extension()
if self.mod.add_options:
Frame(top, height=8).pack()
self.mod.add_options(self, settings, top)
# Command buttons:
ok_button = Button(botframe, text="OK",
command=self.ok_command)
ok_button.pack(side=LEFT)
cancel_button = Button(botframe, text="Cancel",
command=self.cancel_command)
cancel_button.pack(side=RIGHT)
tktools.unify_button_widths(ok_button, cancel_button)
tktools.set_transient(self.root, self.master)
self.check_command()
def get_type_extension(self):
return self.context.app.find_type_extension(
"printing.filetypes", self.ctype)
def new_checkbox(self, parent, description, value):
var = BooleanVar(parent)
var.set(value)
Checkbutton(parent, text=description, variable=var, anchor=W
).pack(anchor=W, fill=X)
return var
def add_entry(self, entry):
self.cursor_widgets.append(entry)
entry.bind("<Return>", self.return_event)
def return_event(self, event):
self.ok_command()
def check_command(self):
if self.printtofile.get():
self.file_entry['state'] = NORMAL
self.cmd_entry['state'] = DISABLED
self.file_entry.focus_set()
else:
self.file_entry['state'] = DISABLED
self.cmd_entry['state'] = NORMAL
self.cmd_entry.focus_set()
def ok_command(self):
if self.printtofile.get():
filename = self.file_entry.get()
if not filename:
self.context.error_dialog("No file",
"Please enter a filename")
return
try:
fp = open(filename, "w")
except IOError, msg:
self.context.error_dialog(IOError, str(msg))
return
else:
cmd = self.cmd_entry.get()
if not cmd:
self.context.error_dialog("No command",
"Please enter a print command")
return
try:
if string.find(cmd, '%s') != -1:
import tempfile
tempname = tempfile.mktemp()
fp = open(tempname, 'w')
else:
fp = os.popen(cmd, "w")
except IOError, msg:
self.context.error_dialog(IOError, str(msg))
return
for e in self.cursor_widgets:
e['cursor'] = CURSOR_WAIT
self.root.update_idletasks()
try:
self.print_to_fp(fp)
except:
# don't want a try/finally since we don't need this to
# execute unless we received an error
for e in self.cursor_widgets: e['cursor'] = ''
raise sys.exc_type, sys.exc_value, sys.exc_traceback
sts = fp.close()
if not sts:
try:
cmd_parts = string.splitfields(cmd, '%s')
cmd = string.joinfields(cmd_parts, tempname)
sts = os.system(cmd)
os.unlink(tempname)
except NameError: # expected on tempname except on NT
pass
if sts:
self.context.error_dialog("Exit",
"Print command exit status %s" % `sts`)
self.root.destroy()
def cancel_event(self, event):
self.cancel_command()
def cancel_command(self):
self.update_settings()
self.root.destroy()
def print_to_fp(self, fp):
# do the printing
from printing import paper
from printing import PSWriter
#
self.update_settings()
if self.ctype == "application/postscript":
parser = self.wrap_parser(FileOutputParser(fp))
parser.feed(self.infp.read())
parser.close()
self.infp.close()
return
apply(self.settings.set_scaling, get_scaling_adjustments(self.root))
paper = paper.PaperInfo(self.settings.papersize,
margins=self.settings.margins,
rotation=self.settings.orientation)
w = PSWriter.PSWriter(fp, self.title, self.baseurl, paper=paper,
settings=self.settings)
p = self.mod.parse(w, self.settings, self.context)
# need to decode the input stream a bit here:
p = self.wrap_parser(p)
p.feed(self.infp.read())
self.infp.close()
p.close()
w.close()
def wrap_parser(self, parser):
# handle the content-encoding and content-transfer-encoding headers
headers = self.infp.info()
content_encoding, transfer_encoding = Reader.get_encodings(headers)
return Reader.wrap_parser(parser, self.ctype,
content_encoding, transfer_encoding)
def update_settings(self):
settings = self.settings
settings.printcmd = self.cmd_entry.get()
settings.printfile = self.file_entry.get()
settings.fileflag = self.printtofile.get()
if self.ctype == "application/postscript":
return
settings.set_fontsize(self.fontsize.get())
settings.orientation = string.lower(self.orientation.get())
if self.mod.update_settings:
self.mod.update_settings(self, settings)
class FileOutputParser:
def __init__(self, fp):
self.__fp = fp
def feed(self, data):
self.__fp.write(data)
def close(self):
pass
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
import shutil
import subprocess
from pyiron.base.settings.install import command_line
import unittest
class TestInstall(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.execution_path = os.path.dirname(os.path.abspath(__file__))
cls.install_script = os.path.join(
cls.execution_path, "../../../pyiron/base/settings/install.py"
)
@classmethod
def tearDownClass(cls):
execution_path = os.path.dirname(os.path.abspath(__file__))
shutil.rmtree(os.path.join(execution_path, "resources"))
shutil.rmtree(os.path.join(execution_path, "project"))
os.remove(os.path.join(execution_path, "config"))
def test_install(self):
command_line(
[
"-c",
os.path.join(self.execution_path, "config"),
"-r",
os.path.join(self.execution_path, "resources"),
"-p",
os.path.join(self.execution_path, "project"),
]
)
with open(os.path.join(self.execution_path, "config"), "r") as f:
content = f.readlines()
self.assertEqual(content[0], "[DEFAULT]\n")
self.assertIn("PROJECT_PATHS", content[1])
self.assertIn("RESOURCE_PATHS", content[2])
self.assertTrue(os.path.exists(os.path.join(self.execution_path, "project")))
self.assertTrue(os.path.exists(os.path.join(self.execution_path, "resources")))
def test_install_help(self):
out = subprocess.check_output(
["python", self.install_script, "--error"],
cwd=self.execution_path,
shell=False,
universal_newlines=True,
)
self.assertEqual(
out,
"install.py -c <config_file> -p <project_path> -r <resource_dir> -u <url>\n",
)
out = subprocess.check_output(
["python", self.install_script, "-h"],
cwd=self.execution_path,
shell=False,
universal_newlines=True,
)
self.assertEqual(
out,
"install.py -c <config_file> -p <project_path> -r <resource_dir> -u <url>\n",
)
if __name__ == "__main__":
unittest.main()
|
def preprocess(input_file, output_file):
all_queries = []
with open(input_file, 'r', errors='ignore') as infile:
for line in infile.readlines():
e1, r, e2 = line.strip().split('\t')
r_num = len(r.split('.'))
if r_num == 1:
all_queries.append([e1, r, e2])
elif r_num == 2:
r1, r2 = r.split('.')
all_queries.append([e1, r1, e2])
all_queries.append([e1, r2, e2])
else:
print(r_num, r)
with open(output_file, 'w') as outfile:
for query in all_queries:
e1, r, e2 = query
outfile.write(e1 + '\t' + r + '\t' + e2 + '\n')
preprocess('../datasets_knowledge_embedding/FB15k-237/train.txt', '../datasets_knowledge_embedding/FB15k-237/my_train.txt')
preprocess('../datasets_knowledge_embedding/FB15k-237/valid.txt', '../datasets_knowledge_embedding/FB15k-237/my_valid.txt')
preprocess('../datasets_knowledge_embedding/FB15k-237/test.txt', '../datasets_knowledge_embedding/FB15k-237/my_test.txt')
|
from unittest import TestCase
class Test(TestCase):
"""
The test will be done in User class because it extends Credentials
"""
def test_credentials(self):
self.fail()
|
# detect cycle in directed graph
# https://github.com/mission-peace/interview/blob/master/src/com/interview/graph/CycleInDirectedGraph.java
from graph import *
GRAY = "gray"
BLACK = "black"
WHITE = "white"
def has_cycle(graph):
visited = set()
current_stack = set()
for vertex in graph.all_vertex.values():
if vertex not in visited:
result = has_cycle_util(vertex, visited, current_stack)
if result:
return True
return False
def has_cycle_util(vertex, visited, current_stack):
visited.add(vertex)
current_stack.add(vertex)
for adjacent in vertex.adjacent_vertices:
if adjacent in current_stack:
return True
else:
result = has_cycle_util(adjacent, visited, current_stack)
if result:
return True
current_stack.remove(vertex)
return False
def has_cycle_using_back_edge(graph):
visited = {}
for vertex in graph.all_vertex.values():
visited[vertex] = WHITE
for vertex in graph.all_vertex.values():
if visited[vertex] == WHITE:
result = has_cycle_using_back_edge_util(vertex, visited)
if result:
return True
return False
def has_cycle_using_back_edge_util(vertex, visited):
if visited[vertex] == GRAY:
return True
visited[vertex] = GRAY
for adjacent in vertex.adjacent_vertices:
result = has_cycle_using_back_edge_util(adjacent, visited)
if result:
return True
visited[vertex] = BLACK
return False
if __name__ == '__main__':
graph = Graph(True)
graph.add_edge(1,2)
graph.add_edge(1,3)
graph.add_edge(3,4)
graph.add_edge(4,5)
graph.add_edge(2,5)
graph.add_edge(5,3)
result1 = has_cycle(graph)
result2 = has_cycle_using_back_edge(graph)
print(str(result1) + " " + str(result2))
|
import nomad.api.exceptions
from nomad.api.base import Requester
class Status(object):
"""
By default, the agent's local region is used
https://www.nomadproject.io/docs/http/status.html
"""
def __init__(self, **kwargs):
self.leader = Leader(**kwargs)
self.peers = Peers(**kwargs)
def __str__(self):
return "{0}".format(self.__dict__)
def __repr__(self):
return "{0}".format(self.__dict__)
def __getattr__(self, item):
raise AttributeError
class Leader(Requester):
ENDPOINT = "status/leader"
def __contains__(self, item):
try:
leader = self.get_leader()
if leader == item:
return True
else:
return False
except nomad.api.exceptions.URLNotFoundNomadException:
return False
def __len__(self):
leader = self.get_leader()
return len(leader)
def get_leader(self):
""" Returns the address of the current leader in the region.
https://www.nomadproject.io/docs/http/status.html
returns: string
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(method="get").json()
class Peers(Requester):
ENDPOINT = "status/peers"
def __contains__(self, item):
try:
peers = self.get_peers()
for p in peers:
if p == item:
return True
else:
return False
except nomad.api.exceptions.URLNotFoundNomadException:
return False
def __len__(self):
peers = self.get_peers()
return len(peers)
def __getitem__(self, item):
try:
peers = self.get_peers()
for p in peers:
if p == item:
return p
else:
raise KeyError
except nomad.api.exceptions.URLNotFoundNomadException:
raise KeyError
def __iter__(self):
peers = self.get_peers()
return iter(peers)
def get_peers(self):
""" Returns the set of raft peers in the region.
https://www.nomadproject.io/docs/http/status.html
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(method="get").json()
|
from social_auth.backends.facebook import FacebookBackend
from social_auth.backends import google
def social_extra_values(sender, user, response, details, **kwargs):
result = False
if "id" in response:
from apps.photo.models import Photo
from urllib2 import urlopen, HTTPError
from django.template.defaultfilters import slugify
from apps.account.utils import user_display
from django.core.files.base import ContentFile
try:
url = None
if sender == FacebookBackend:
url = "http://graph.facebook.com/%s/picture?type=large" \
% response["id"]
elif sender == google.GoogleOAuth2Backend and "picture" in response:
url = response["picture"]
if url:
avatar = urlopen(url)
photo = Photo(author = user, is_avatar = True)
photo.picture.save(slugify(user.username + " social") + '.jpg',
ContentFile(avatar.read()))
photo.save()
except HTTPError:
pass
result = True
return result
pre_update.connect(social_extra_values, sender=None)
|
# coding=utf-8
from .parsed_date import ParsedDate # noqa
|
from django import forms
from utils.fields import SlugField
from utils.forms import BootstrapMixin, SmallTextarea, StaticSelect, add_blank_choice
from .enums import PasswordAlgorithm
from .models import Platform
class PlatformForm(BootstrapMixin, forms.ModelForm):
slug = SlugField(max_length=255)
password_algorithm = forms.ChoiceField(
required=False,
choices=add_blank_choice(PasswordAlgorithm.choices),
widget=StaticSelect,
)
class Meta:
model = Platform
fields = [
"name",
"slug",
"napalm_driver",
"napalm_args",
"password_algorithm",
"description",
]
widgets = {"napalm_args": SmallTextarea()}
|
import json
import time
from typing import List, Tuple, cast, Callable
import polyline
import requests
from .joblib import memory
class _QueryTopo:
_last_query: float = 0
def __call__(self) -> Callable[[List[Tuple[float, float]]], List[float]]:
@memory.cache
def impl(data: List[Tuple[float, float]]) -> List[float]:
encoded_line = polyline.encode(data, 5)
if time.time() - self._last_query < 1:
time.sleep(1 - (time.time() - self._last_query))
result = requests.get(f'https://api.opentopodata.org/v1/srtm90m?locations={encoded_line}')
self._last_query = time.time()
if result.status_code != 200:
raise Exception(f'Error accessing opentopodata: {result.content!r}')
points = json.loads(result.content)['results']
return list(map(lambda p: cast(float, p['elevation']), points))
return cast(Callable[[List[Tuple[float, float]]], List[float]], impl)
query_topo: Callable[[List[Tuple[float, float]]], List[float]] = _QueryTopo()()
|
from pathlib import Path
import os
import sys
source_path = str(Path(os.path.abspath(__file__)).parent.parent)
if source_path not in sys.path:
sys.path.insert(0, source_path)
import pandas as pd
from sqlalchemy import create_engine
import os
import yaml
from os.path import expanduser
from pathlib import Path
from datetime import datetime
import logging
import signal, time, random
class TimeoutError(RuntimeError):
pass
def handler(signum, frame):
raise TimeoutError()
def max_run_time(seconds):
signal.signal(signal.SIGALRM, handler)
return signal.alarm(seconds)
def connect_to_database():
"""
Connects to postgre database using .pgpass
Return
------
sqlalchemy.engine.base.Engine
database engine / connection
"""
env_dict = open_yaml(Path(os.path.abspath(__file__)).parent.parent.parent / 'env.yaml')
host, port, database, user, password = env_dict['database'].replace('\n', '').split(':')
database_uri = 'postgresql://{}:{}@{}:{}/{}'.format(user, password, host, port, database)
return create_engine(database_uri)
def open_yaml(path):
"""
Load yaml file.
Parameters
----------
path: pathlib.PosixPath
Path to yaml file
Return
------
Dictionary
Dictionary with yaml file content
"""
with open(path) as stream:
try:
yaml_dict = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.error('Error when opening YAML file.', exc_info=1)
return yaml_dict
def path_to_shared(username,
directory,
filename,
filetype
):
"""
Generate path to shared folder at '/data/shared_data'.
All files will be with the following name:
`[username]_[filename]_[timestamp].[filetype]`
Timestamp has the pattern '%Y%m%d%H%M%S'
The folder is structured as:
-- imgs
-- notebooks
-- data
-- raw
-- treated
-- output
Example
-------
```
import utils
df = pd.read_csv('some_csv')
df.to_csv(utils.path_to_shared('joaocarabetta','data_output', 'filename', 'csv'))
```
Parameters
----------
username: str
your username. try not to change it
directory: str
where you want to save the file.
the available options are: imgs, data_output, data_raw, data_treated, notebooks
filename: str
the name of the file
filetype: str
the type of the file, e.g. csv, png, pdf ...
do not use a dot in the beggining.
"""
data_path = '/data/shared_data'
dir_to_path = {
'imgs': 'imgs',
'data_output': 'data/output',
'data_raw': 'data/raw',
'data_treated': 'data/treated',
'notebooks': 'notebooks'
}
try:
dir_path = dir_to_path[directory]
except KeyError:
print(f'You entered {directory} as a directory. But, the only available directories are: {", ".join(dir_to_path.keys())}')
raise KeyError
timestamp = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
file_path = str(Path('/data/shared_data') / dir_path / f'{username}_{filename}_{timestamp}.{filetype}')
open(file_path, 'w+').write(' ')
os.chmod(file_path, 0o774) # change permission to 774
return file_path
def calculate_bool_stats(df, groupby_col, bool_col='bool_of_effective_complaints', count_col='id_llamado'):
"""
Given a data frame with a group column, a boolean column and a value column,
calculates stats by group:
- Total value (bool0 + bool1)
- Rate of bool1 over total for each group
- Percentage of bool1 over complete df
- Cumulative percentage over complete df
Parameters
----------
df: pandas.DataFrame
Table schema
groupby_col: string
Column name for which to group by
bool_col: string
Boolean column
count_col: string
Numeric column over which calculate stats
Return
-------
pandas.DataFrame
Stats table
"""
groupby_df = df.groupby([groupby_col, bool_col])[count_col].nunique().to_frame().reset_index()
stats_df = groupby_df.pivot(index=groupby_col, columns=bool_col, values=count_col)\
.fillna(0).reset_index().rename(index=str, columns={0: 'n_bool_0', 1: 'n_bool_1'})
stats_df.columns.name = None
stats_df['n_total'] = stats_df['n_bool_1'] + stats_df['n_bool_0']
stats_df['rate'] = stats_df['n_bool_1'] / stats_df['n_total'] * 100
stats_df['percentage'] = stats_df['n_bool_1'] / stats_df['n_bool_1'].sum() * 100
stats_df = stats_df.sort_values('percentage', ascending=False)
stats_df['cum_percentage'] = stats_df['percentage'].cumsum()
return stats_df
def check_null_values(schema, table, con):
"""
Reads through the table to count null values
Parameters
----------
schema: string
Table schema
table: string
Table name
con: sqlalchemy.engine.base.Engine
slalchemy connection
Return
-------
pandas.DataFrame
Number of nulls per columns
"""
query = f'select * from {schema}.{table} limit 1'
columns = list(pd.read_sql_query(query, con).columns)
results = []
for column in columns:
duplicate_query = f"""
select count(*) - count({column})
from {schema}.{table}"""
results.append({
'column': column,
'nulls': pd.read_sql_query(duplicate_query, con).values[0][0]
})
return pd.DataFrame(results)
def load_pandas_to_db(df, table, schema, how='append'):
"""
Load pandas DataFrame into database.
Parameters
----------
df: pandas DataFrame
DataFrame to load.
table: string
Name of the target table to upload data.
schema: string
Name of the schema where the target table is.
how: string
In case the table already exists, what should happen: 'fail', 'replace', 'append' (default).
"""
con = connect_to_database()
try:
df.to_sql(name=table, schema=schema, con=con, if_exists=how, index=False)
logging.info("Table loaded successfully!")
except Exception as e:
logging.error('Table could not be loaded', exc_info=True)
def retrieve_column_names_postgres(schema, table):
"""
Retrieve column names of a PostgreSQL table.
Parameters
-----------
schema: string
Schema name.
table: string
Table name.
Returns
-------
List of the column names.
"""
con = connect_to_database()
query = f"select column_name " \
f" from information_schema.columns " \
f" where table_schema = '{schema}' " \
f" and table_name = '{table}'"
column_names = pd.read_sql_query(query, con)
return column_names['column_name'].tolist()
def split_yaml(yaml_path, output_folder, split_by):
"""
Split a YAML file into multiple files by some key.
Split files are saved into an output folder.
Parameters
-----------
yaml_path: string
Complete path of the YAML file.
output_folder: string
Folder name where to output the splits.
split_by: string
Key to use for splitting.
"""
if type(yaml_path) == str:
file_name = yaml_path.split("/")[-1].replace('.yaml', '')
else:
file_name = yaml_path.stem
yaml_content = open_yaml(yaml_path)
for i, approach in enumerate(yaml_content[split_by]):
output_dict = yaml_content.copy()
output_dict[split_by] = [approach]
with open(f"{output_folder}/{file_name}_{i+1}.yaml", 'w') as outfile:
yaml.dump(output_dict, outfile, default_flow_style=False)
if __name__ == '__main__':
connect_to_database()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
TIPO = 'selectable' # 'basic' or 'selectable'. 'basic': necesario para el funcionamiento del programa
# 'selectable': No necesario. Añade nuevas funcionalidades al programa
# Por ejemplo autenticar es 'basic', pero actas es prescindible
# El code_menu debe ser único y se configurará como un permiso del sistema
MENU_DEFAULT = [
{'code_menu': 'acceso_informes_usuarios',
'texto_menu': 'Informes',
'href': '', 'nivel': 1,
'tipo': 'Accesible',
'pos': 1
},
{'code_menu': 'acceso_informes_seguimiento',
'texto_menu': 'Informe de seguimiento',
'href': 'informes_seguimiento',
'nivel': 2,
'tipo': 'Accesible',
'pos': 1,
'parent': 'acceso_informes_usuarios'
},
# {'code_menu': 'acceso_generar_actillas',
# 'texto_menu': 'Actillas',
# 'href': 'generar_actillas',
# 'nivel': 2,
# 'tipo': 'Accesible',
# 'pos': 2,
# 'parent': 'acceso_informes_usuarios'
# },
{'code_menu': 'acceso_informes_tareas',
'texto_menu': 'Informe con tareas',
'href': 'informes_tareas',
'nivel': 2,
'tipo': 'Accesible',
'pos': 3,
'parent': 'acceso_informes_usuarios'
}
]
# Se añaden otros permisos para el usuario
PERMISOS = [{'code_nombre': 'solicita_informes_seguimiento',
'nombre': 'Tiene permiso para solicitar informes de seguimiento de los usuarios de la entidad',
'menu': 'acceso_informes_seguimiento'
},
{'code_nombre': 'borra_informes_seguimiento',
'nombre': 'Tiene permiso para borrar informes de seguimiento de los usuarios de la entidad',
'menu': 'acceso_informes_seguimiento'
},
{'code_nombre': 'edita_informes_seguimiento',
'nombre': 'Tiene permiso para editar informes de seguimiento de los usuarios de la entidad',
'menu': 'acceso_informes_seguimiento'
},
{'code_nombre': 've_informes_seguimiento',
'nombre': 'Tiene permiso para ver informes de seguimiento de los usuarios de la entidad',
'menu': 'acceso_informes_seguimiento'
},
{'code_nombre': 'borra_preguntas_informes_seguimiento',
'nombre': 'Tiene permiso para borrar preguntas de los informes de seguimiento de la entidad',
'menu': 'acceso_informes_seguimiento'
},
# {'code_nombre': 'genera_actillas_grupos_usuarios',
# 'nombre': 'Tiene permiso para sancionar como director',
# 'menu': 'acceso_sancionar_conductas'
# },
{'code_nombre': 'solicita_informes_tareas',
'nombre': 'Tiene permiso para solicitar informes con tareas de los usuarios de la entidad',
'menu': 'acceso_informes_tareas'
},
{'code_nombre': 'borra_informes_tareas',
'nombre': 'Tiene permiso para borrar informes con tareas de los usuarios de la entidad',
'menu': 'acceso_informes_tareas'
},
{'code_nombre': 'edita_informes_tareas',
'nombre': 'Tiene permiso para editar informes con tareas de los usuarios de la entidad',
'menu': 'acceso_informes_tareas'
},
{'code_nombre': 've_informes_tareas',
'nombre': 'Tiene permiso para ver informes con tareas de los usuarios de la entidad',
'menu': 'acceso_informes_tareas'
}
]
|
#!/usr/bin/env python3
# encoding: utf-8
# cook_datasets.py
import numpy as np
import pandas as pd
import pickle
from sklearn.preprocessing import StandardScaler
USPS_DIM = 256
LEU_DIM = 7129
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def parse_data(data, d):
x = np.empty((len(data), d))
y = np.empty(len(data))
for i, line in enumerate(data):
values = line.strip().split()
y[i] = float(values[0])
for j, v in enumerate(values[1:]):
x[i, j] = float(v.split(':')[1])
return x, y
def main():
# Powerplant
data = pd.read_excel('datasets/Powerplant/CCPP/Folds5x2_pp.xlsx')
x = data.loc[:, ['AT', 'V', 'AP', 'RH']].values
y = data.PE.values
x = StandardScaler().fit_transform(x)
np.save('datasets/Powerplant/data', x)
np.save('datasets/Powerplant/labels', y)
print('Powerplant cooked')
# LETTER
with open('datasets/LETTER/data', 'r') as f:
lines = f.readlines()
data = np.empty([len(lines), 16])
labels = np.empty(len(lines))
for i,line in enumerate(lines):
line = line.strip()
line = line.split(',')
data[i, :] = np.array(line[1:])
labels[i] = ord(line[0]) - 65
np.save('datasets/LETTER/data', data)
np.save('datasets/LETTER/labels', labels)
print('LETTER cooked')
# USPS
with open('datasets/USPS/data', 'r') as f:
train = f.readlines()
with open('datasets/USPS/data_test', 'r') as f:
test = f.readlines()
xtrain, ytrain = parse_data(train, USPS_DIM)
xtest, ytest = parse_data(test, USPS_DIM)
data = np.vstack((xtrain, xtest))
labels = np.hstack((ytrain, ytest))
np.save('datasets/USPS/data', data)
np.save('datasets/USPS/labels', labels)
print('USPS cooked')
# CIFAR100
test = unpickle('datasets/CIFAR100/cifar-100-python/test')
test_data = test[b'data']
test_labels = test[b'fine_labels']
train = unpickle('datasets/CIFAR100/cifar-100-python/train')
train_data = train[b'data']
train_labels = train[b'fine_labels']
data = np.vstack([train_data, test_data])
labels = np.hstack([train_labels, test_labels])
np.save('datasets/CIFAR100/data.npy', data)
np.save('datasets/CIFAR100/labels.npy', labels)
print('CIFAR100 cooked')
# LEUKEMIA
with open('datasets/LEUKEMIA/leu', 'r') as f:
train = f.readlines()
with open('datasets/LEUKEMIA/leu.t', 'r') as f:
test = f.readlines()
xtrain, ytrain = parse_data(train, LEU_DIM)
xtest, ytest = parse_data(test, LEU_DIM)
data = np.vstack((xtrain, xtest))
labels = np.hstack((ytrain, ytest))
np.save('datasets/LEUKEMIA/data', data)
np.save('datasets/LEUKEMIA/labels', labels)
print('LEUKEMIA cooked')
print('Finished!')
|
#! /usr/bin/env python
import os
import numpy as np
import astropy.io.fits as fits
from . import noise_simulation as ng
def add_dark_current(ramp, seed, gain, darksignal):
"""
Adds dark current to the input signal
Parameters
----------
ramp: sequence
The array of ramp images
seed: int
The seed for the dark signal
gain: float
The detector gain
darksignal: sequence
A 2D map of the dark signal to project onto the ramp
Returns
-------
np.ndarray
The dark signal ramp
"""
# Get the random seed and array shape
np.random.seed(seed)
dims = ramp.shape
# Add the dark signal to the ramp
total = darksignal*0.
for n in range(dims[0]):
signal = np.random.poisson(darksignal)/gain
total = total+signal
ramp[n,:,:] = ramp[n,:,:]+total
return ramp
def make_exposure(nints, ngrps, darksignal, gain, pca0_file, noise_seed=None,
dark_seed=None, offset=500):
"""
Make a simulated exposure with no source signal
Parameters
----------
nints: int
The number of integrations
ngrps: int
The number of groups per integration
darksignal: sequence
A dark frame
gain: float
The gain on the detector
pca0_file: str
The path to the PCA-zero file
noise_seed: int
The seed for the generated noise
dark_seed: int
The seed for the generated dark
offset: int
The pedestal offset
Returns
-------
np.ndarray
A simulated ramp of darks
"""
if nints < 1 or ngrps < 1:
return None
if not noise_seed:
noise_seed = 7+int(np.random.uniform()*4000000000.)
if not dark_seed:
dark_seed = 5+int(np.random.uniform()*4000000000.)
np.random.seed(dark_seed)
# Make empty data array
nrows, ncols = darksignal.shape
simulated_data = np.zeros([nints*ngrps,nrows,ncols], dtype=np.float32)
# Define some constants
pedestal = 18.30
c_pink = 9.6
u_pink = 3.2
acn = 2.0
bias_amp = 0.
#bias_amp = 5358.87
#bias_offset = 20944.06
pca0_amp = 0.
rd_noise = 12.95
dark_current = 0.0
dc_seed = dark_seed
bias_offset = offset*gain
# Define the HXRGN instance to make a SUSBSTRIP256 array
#(in detector coordinates)
noisecube = ng.HXRGNoise(naxis1=nrows, naxis2=ncols, naxis3=ngrps,
pca0_file=pca0_file, x0=0, y0=0, det_size=2048,
verbose=False)
# iterate over integrations
for loop in range(nints):
seed1 = noise_seed+24*int(loop)
ramp = noisecube.mknoise(c_pink=c_pink, u_pink=u_pink,
bias_amp=bias_amp, bias_offset=bias_offset,
acn=acn, pca0_amp=pca0_amp, rd_noise=rd_noise,
pedestal=pedestal, dark_current=dark_current,
dc_seed=dc_seed, noise_seed=seed1, gain=gain)
if len(ramp.shape)==2:
ramp = ramp[np.newaxis,:,:]
ramp = np.transpose(ramp,(0,2,1))
ramp = ramp[::,::-1,::-1]
ramp = add_dark_current(ramp, dc_seed, gain, darksignal)
simulated_data[loop*ngrps:(loop+1)*ngrps,:,:] = np.copy(ramp)
ramp = 0
return simulated_data
def make_photon_yield(photon_yield, orders):
"""
Generates a map of the photon yield for each order.
The shape of both arrays should be [order, nrows, ncols]
Parameters
----------
photon_yield: str
The path to the file containg the calculated photon yield at each pixel
orders: sequence
An array of the median image of each order
Returns
-------
np.ndarray
The array containing the photon yield map for each order
"""
# Get the shape and create empty arrays
dims = orders.shape
sum1 = np.zeros((dims[1], dims[2]), dtype=np.float32)
sum2 = np.zeros((dims[1], dims[2]), dtype=np.float32)
# Add the photon yield for each order
for n in range(dims[0]):
sum1 = sum1+photon_yield[n, :, :]*orders[n, :, :]
sum2 = sum2+orders[n, :, :]
# Take the ratio of the photon yield to the signal
pyimage = sum1/sum2
pyimage[np.where(sum2 == 0.)] = 1.
return pyimage
def add_signal(signals, cube, pyimage, frametime, gain, zodi, zodi_scale,
photon_yield=False):
"""
Add the science signal to the generated noise
Parameters
----------
signals: sequence
The science frames
cube: sequence
The generated dark ramp
pyimage: sequence
The photon yield per order
frametime: float
The number of seconds per frame
gain: float
The detector gain
zodi: sequence
The zodiacal background image
zodi_scale: float
The scale factor for the zodi background
"""
# Get the data dimensions
dims1 = cube.shape
dims2 = signals.shape
if dims1 != dims2:
raise ValueError(dims1, "not equal to", dims2)
# Make a new ramp
newcube = cube.copy()*0.
# The background is assumed to be in electrons/second/pixel, not ADU/s/pixel.
background = zodi*zodi_scale*frametime
# Iterate over each group
for n in range(dims1[0]):
framesignal = signals[n,:,:]*gain*frametime
# Add photon yield
if photon_yield:
newvalues = np.random.poisson(framesignal)
target = pyimage-1.
for k in range(dims1[1]):
for l in range(dims1[2]):
if target[k,l] > 0.:
n = int(newvalues[k,l])
values = np.random.poisson(target[k,l], size=n)
newvalues[k,l] = newvalues[k,l]+np.sum(values)
newvalues = newvalues+np.random.poisson(background)
# Or don't
else:
vals = np.abs(framesignal*pyimage+background)
newvalues = np.random.poisson(vals)
# First ramp image
if n==0:
newcube[n,:,:] = newvalues
else:
newcube[n,:,:] = newcube[n-1,:,:]+newvalues
newcube = cube+newcube/gain
return newcube
def non_linearity(cube, nonlinearity, offset=0):
"""
Add nonlinearity to the ramp
Parameters
----------
cube: sequence
The ramp with no non-linearity
nonlinearity: sequence
The non-linearity image to add to the ramp
offset: int
The non-linearity offset
Returns
-------
np.ndarray
The ramp with the added non-linearity
"""
# Get the dimensions of the input data
dims1 = nonlinearity.shape
dims2 = cube.shape
if (dims1[1] != dims2[1]) | (dims1[1] != dims2[1]):
raise ValueError
# Make a new array for the ramp+non-linearity
newcube = cube-offset
for k in range(dims2[0]):
frame = np.squeeze(np.copy(newcube[k,:,:]))
sum1 = frame*0.
for n in range(dims1[0]-1,-1,-1):
sum1 = sum1+nonlinearity[n,:,:]*np.power(frame,n+1)
sum1 = frame*(1.+sum1)
newcube[k,:,:] = sum1
newcube = newcube+offset
return newcube
def add_pedestal(cube, pedestal, offset=500):
"""
Add a pedestal to the ramp
Parameters
----------
cube: sequence
The ramp with no pedestal
pedestal: sequence
The pedestal image to add to the ramp
offset: int
The pedestal offset
Returns
-------
np.ndarray
The ramp with the added pedestal
"""
# Add the offset to the pedestal
ped1 = pedestal+(offset-500.)
# Make a new array for the ramp+pedestal
dims = cube.shape
newcube = np.zeros_like(cube,dtype=np.float32)
# Iterate over each integration
for n in range(dims[0]):
newcube[n,:,:] = cube[n,:,:]+ped1
newcube = newcube.astype(np.uint16)
return newcube
|
'''!
Декларация класса Depth_calculator и запуск ноды данного класса в main()
'''
import rclpy
import cv2 as cv
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import Float32
from cv_bridge import CvBridge
import sys
import time
import numpy as np
class Depth_calculator(Node):
'''!
@brief Класс ноды Depth_calculator
@details Читает -/aligned_depth_to_color/ - depth image натянутый на обычную картинку \n
Читает -/object_detection/coords - здесь считываются координаты, по которым находится среднее значение высоты холста\n
Публикует -/depth_node/canvas_average_depth - среднее значение высоты холста
'''
## Конструктор класса
def __init__(self):
super().__init__("depth_node")
## Subscriber читает sensor_msgs.msg.Image из топика камеры /camera/aligned_depth_to_color/image_raw
self.subscriber = self.create_subscription(
Image,
"/camera/aligned_depth_to_color/image_raw",
self.callback,
10
)
## Subscriber читает координаты из топика камеры /object_detection/coords
self.sub_for_coords = self.create_subscription(
Image,
"/object_detection/coords",
self.get_coords,
10
)
## Publisher публикует std_msgs.msg.Float32 значение средней высоты холста в топик /object_detection/coords
self.pub_canvas_average_depth = self.create_publisher(
Float32,
"/depth_node/canvas_average_depth",
1
)
## cvbridge для передачи изображения в формате ros msg
self.cv_bridge = CvBridge()
## cvbridge для передачи координат в формате ros msg
self.bridge_coords = CvBridge()
## Координаты пикселей вершин
self.coords = np.array([])
def callback(self, msg):
'''!
Вычисление и публикация средней высоты холста
@return: null
'''
depths = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding = "16UC1")
if self.coords.size:
self.average = calc_average(depths, self.coords)
msg2 = Float32()
msg2.data = self.average
self.pub_canvas_average_depth.publish(msg2)
## Получение координат-пикселей вершин
def get_coords(self, msg):
self.coords = self.bridge_coords.imgmsg_to_cv2(msg, desired_encoding = "16UC1")
## Вычисление среднего
def calc_average(data, coords):
#заглушка, нужно считать
average = np.round(860.0)
return average
def main(args = None):
'''!
Запуск ноды
@param args: аргументы
@return: null
'''
rclpy.init(args = args)
calculator = Depth_calculator()
rclpy.spin(calculator)
calculator.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
from unittest import TestCase
from kaggle_runner.logs import NBatchProgBarLogger
class TestLogs(TestCase):
@classmethod
def setup_class(cls):
"prepare a dummy model for logger to show"
# TODO set up a dummy model / or just use imagenette
cls.model = None
@classmethod
def teardown_class(cls):
print("teardown_class called once for the class")
def test_NBatchProgBarLogger(self):
assert self.model is not None
self.BS = 100
self.model.fit(
[X_train_aux, X_train_main],
Y_train,
batch_size=BS,
verbose=0,
callbacks=[NBatchProgBarLogger()],
)
|
"""
Description
===========
A Ducci sequence is a sequence of n-tuples of integers, sometimes known as "the
Diffy game", because it is based on sequences. Given an n-tuple of integers
(a_1, a_2, ... a_n) the next n-tuple in the sequence is formed by taking the
absolute differences of neighboring integers. Ducci sequences are named after
Enrico Ducci (1864-1940), the Italian mathematician credited with their
discovery.
Some Ducci sequences descend to all zeroes or a repeating sequence. An example
is (1,2,1,2,1,0) -> (1,1,1,1,1,1) -> (0,0,0,0,0,0).
Additional information about the Ducci sequence can be found in this writeup
from Greg Brockman, a mathematics student.
It's kind of fun to play with the code once you get it working and to try and
find sequences that never collapse and repeat. One I found was (2, 4126087,
4126085), it just goes on and on.
It's also kind of fun to plot these in 3 dimensions. Here is an example of the
sequence "(129,12,155,772,63,4)" turned into 2 sets of lines (x1, y1, z1, x2,
y2, z2).
Input Description
-----------------
You'll be given an n-tuple, one per line. Example:
(0, 653, 1854, 4063)
Output Description
------------------
Your program should emit the number of steps taken to get to either an all 0
tuple or when it enters a stable repeating pattern. Example:
[0; 653; 1854; 4063]
[653; 1201; 2209; 4063]
[548; 1008; 1854; 3410]
[460; 846; 1556; 2862]
[386; 710; 1306; 2402]
[324; 596; 1096; 2016]
[272; 500; 920; 1692]
[228; 420; 772; 1420]
[192; 352; 648; 1192]
[160; 296; 544; 1000]
[136; 248; 456; 840]
[112; 208; 384; 704]
[96; 176; 320; 592]
[80; 144; 272; 496]
[64; 128; 224; 416]
[64; 96; 192; 352]
[32; 96; 160; 288]
[64; 64; 128; 256]
[0; 64; 128; 192]
[64; 64; 64; 192]
[0; 0; 128; 128]
[0; 128; 0; 128]
[128; 128; 128; 128]
[0; 0; 0; 0]
24 steps
Challenge Input
---------------
(1, 5, 7, 9, 9)
(1, 2, 1, 2, 1, 0)
(10, 12, 41, 62, 31, 50)
(10, 12, 41, 62, 31)
"""
import sys
def ducci_step(seq):
seq = tuple(abs(seq[i] - seq[i-1]) for i in range(len(seq)))
seq = seq[1:] + seq[0:1]
return seq
def count_iters(seq, verbose=False):
seen = set()
while seq not in seen and set(seq) != {0} and len(seen) < 1000:
seen.add(seq)
if verbose:
print(f"{len(seen)}: {seq}")
seq = ducci_step(seq)
return len(seen) + 1
def test(verbose=False):
assert count_iters((0, 653, 1854, 4063), verbose=verbose) == 24
return
def main(verbose=False):
while True:
line = sys.stdin.readline()
if not line:
break
seq = line.strip() # '(1, 2, 3, 4)'
seq = seq[1:-1] # '1, 2, 3, 4'
seq = seq.split(',') # '['1', ' 2', ' 3', ' 4']
seq = tuple(map(int, seq)) # (1, 2, 3, 4)
print("{} steps".format(count_iters(seq, verbose=verbose)))
return 1
if __name__ == "__main__":
test()
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.