text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Operators that integrates with Google Cloud Build service."""
import json
import re
import warnings
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Tuple, Union
from urllib.parse import unquote, urlparse
import yaml
from google.api_core.retry import Retry
from google.cloud.devtools.cloudbuild_v1.types import Build, BuildTrigger, RepoSource
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook
if TYPE_CHECKING:
from airflow.utils.context import Context
REGEX_REPO_PATH = re.compile(r"^/(?P<project_id>[^/]+)/(?P<repo_name>[^/]+)[\+/]*(?P<branch_name>[^:]+)?")
class CloudBuildCancelBuildOperator(BaseOperator):
"""
Cancels a build in progress.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCancelBuildOperator`
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.cancel_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildCreateBuildOperator(BaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param build: Optional, the build resource to create. If a dict is provided, it must be of
the same form as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`.
Only either build or body should be passed.
:type build: Optional[Union[dict, `google.cloud.devtools.cloudbuild_v1.types.Build`]]
:param body: (Deprecated) The build resource to create.
This parameter has been deprecated. You should pass the build parameter instead.
:type body: Optional[dict]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "build", "body", "gcp_conn_id", "impersonation_chain")
def __init__(
self,
*,
build: Optional[Union[Dict, Build]] = None,
body: Optional[Dict] = None,
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.body = body
if body and build:
raise AirflowException("You should not pass both build or body parameters. Both are set.")
if body is not None:
warnings.warn(
"The body parameter has been deprecated. You should pass body using the build parameter.",
DeprecationWarning,
stacklevel=4,
)
actual_build = body
else:
if build is None:
raise AirflowException("You should pass one of the build or body parameters. Both are None")
actual_build = build
self.build = actual_build
# Not template fields to keep original value
self.build_raw = actual_build
def prepare_template(self) -> None:
# if no file is specified, skip
if not isinstance(self.build_raw, str):
return
with open(self.build_raw) as file:
if any(self.build_raw.endswith(ext) for ext in ['.yaml', '.yml']):
self.build = yaml.load(file.read(), Loader=yaml.FullLoader)
if self.build_raw.endswith('.json'):
self.build = json.loads(file.read())
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
build = BuildProcessor(build=self.build).process_body()
result = hook.create_build(
build=build,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildCreateBuildTriggerOperator(BaseOperator):
"""
Creates a new BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildTriggerOperator`
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "trigger", "gcp_conn_id")
def __init__(
self,
*,
trigger: Union[dict, BuildTrigger],
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.create_build_trigger(
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class CloudBuildDeleteBuildTriggerOperator(BaseOperator):
"""
Deletes a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildDeleteBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to delete.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
hook.delete_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudBuildGetBuildOperator(BaseOperator):
"""
Returns information about a previously requested build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildOperator`
:param id_: The ID of the build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildGetBuildTriggerOperator(BaseOperator):
"""
Returns information about a BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to get.
:type trigger_id: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class CloudBuildListBuildTriggersOperator(BaseOperator):
"""
Lists existing BuildTriggers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildTriggersOperator`
:param location: The location of the project.
:type location: string
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:type page_token: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: List[dict]
"""
template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id")
def __init__(
self,
*,
location: str,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_build_triggers(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [BuildTrigger.to_dict(result) for result in results]
class CloudBuildListBuildsOperator(BaseOperator):
"""
Lists previously requested builds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildsOperator`
:param location: The location of the project.
:type location: string
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param page_size: Optional, number of results to return in the list.
:type page_size: Optional[int]
:param filter_: Optional, the raw filter text to constrain the results.
:type filter_: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: List[dict]
"""
template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id")
def __init__(
self,
*,
location: str,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_builds(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
filter_=self.filter_,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [Build.to_dict(result) for result in results]
class CloudBuildRetryBuildOperator(BaseOperator):
"""
Creates a new build based on the specified build. This method creates a new build
using the original build request, which may or may not result in an identical build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRetryBuildOperator`
:param id_: Build ID of the original build.
:type id_: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id")
def __init__(
self,
*,
id_: str,
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.retry_build(
id_=self.id_,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildRunBuildTriggerOperator(BaseOperator):
"""
Runs a BuildTrigger at a particular source revision.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRunBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param source: Source to build against this trigger. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:type source: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.RepoSource`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param wait: Optional, wait for operation to finish.
:type wait: Optional[bool]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "source", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
source: Union[dict, RepoSource],
project_id: Optional[str] = None,
wait: bool = True,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.source = source
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.run_build_trigger(
trigger_id=self.trigger_id,
source=self.source,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Build.to_dict(result)
class CloudBuildUpdateBuildTriggerOperator(BaseOperator):
"""
Updates a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildUpdateBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:type trigger_id: str
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:type trigger: Union[dict, `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`]
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: Optional[str]
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:type retry: Optional[Retry]
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:type timeout: Optional[float]
:param metadata: Optional, additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: Optional[str]
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dict
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "trigger", "gcp_conn_id")
def __init__(
self,
*,
trigger_id: str,
trigger: Union[dict, BuildTrigger],
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.update_build_trigger(
trigger_id=self.trigger_id,
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return BuildTrigger.to_dict(result)
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param build: The request body of the build.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
:type build: Union[Dict, Build]
"""
def __init__(self, build: Union[Dict, Build]) -> None:
self.build = deepcopy(build)
def _verify_source(self) -> None:
if not (("storage_source" in self.build["source"]) ^ ("repo_source" in self.build["source"])):
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storage_source and repo_source."
)
def _reformat_source(self) -> None:
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self) -> None:
if "repo_source" not in self.build["source"]:
return
repo_source = self.build["source"]["repo_source"]
if not isinstance(repo_source, str):
return
self.build["source"]["repo_source"] = self._convert_repo_url_to_dict(repo_source)
def _reformat_storage_source(self) -> None:
if "storage_source" not in self.build["source"]:
return
storage_source = self.build["source"]["storage_source"]
if not isinstance(storage_source, str):
return
self.build["source"]["storage_source"] = self._convert_storage_url_to_dict(storage_source)
def process_body(self) -> Build:
"""
Processes the body passed in the constructor
:return: the body.
:rtype: `google.cloud.devtools.cloudbuild_v1.types.Build`
"""
if 'source' in self.build:
self._verify_source()
self._reformat_source()
return Build(self.build)
@staticmethod
def _convert_repo_url_to_dict(source: str) -> Dict[str, Any]:
"""
Convert url to repository in Google Cloud Source to a format supported by the API
Example valid input:
.. code-block:: none
https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:
"""
url_parts = urlparse(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.cloud.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
branch_name = unquote(match.group("branch_name")) if match.group("branch_name") else "master"
source_dict = {
"project_id": project_id,
"repo_name": repo_name,
"branch_name": branch_name,
}
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> Dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlparse(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict: Dict[str, Any] = {
"bucket": url_parts.hostname,
"object_": url_parts.path[1:],
}
if url_parts.fragment:
source_dict["generation"] = int(url_parts.fragment)
return source_dict
|
{
"content_hash": "4af1e254bff1dbd7d7f115ca85098dcf",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 109,
"avg_line_length": 43.018793273986155,
"alnum_prop": 0.6573162880529753,
"repo_name": "mistercrunch/airflow",
"id": "9ca36c7231c437a1f2a8e0b82ba74ca3695deb43",
"size": "44280",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/operators/cloud_build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
import project_management.api
urlpatterns = [
url(r'^$', project_management.api.ProjectListAPIView.as_view()),
url(r'^all$', project_management.api.ProjectListAllAPIView.as_view()),
url(r'^detail/(?P<slug>[\w-]+)$', project_management.api.ProjectRetrieveAPIView.as_view()),
url(r'^edit/(?P<slug>[\w-]+)$', project_management.api.ProjectUpdateAPIView.as_view()),
url(r'^untrack/(?P<slug>[\w-]+)$', project_management.api.ProjectUntrackAPIView.as_view()),
url(r'^track/(?P<slug>[\w-]+)$', project_management.api.ProjectTrackAPIView.as_view()),
]
|
{
"content_hash": "012f82d8f392c9637292f84c18733818",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 95,
"avg_line_length": 44.13333333333333,
"alnum_prop": 0.6933534743202417,
"repo_name": "wbg-optronix-lab/emergence-lab",
"id": "50313dac975fc393f627cc22a442773797947706",
"size": "686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project_management/urls/api/project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3909"
},
{
"name": "HTML",
"bytes": "239826"
},
{
"name": "JavaScript",
"bytes": "16743"
},
{
"name": "Python",
"bytes": "513490"
}
],
"symlink_target": ""
}
|
from .code import ConvolutionalCode
from .naive_ml import NaiveMLDecoder
from .stack import StackDecoder
from .node import Node
|
{
"content_hash": "f8229b7b6d728a0beb024d3015ff466a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 36,
"avg_line_length": 32,
"alnum_prop": 0.8359375,
"repo_name": "eliasrg/SURF2017",
"id": "eb04a1db26aed07dc4c01f7436dca5ddb36acfbb",
"size": "205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/separate/coding/convolutional/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98455"
}
],
"symlink_target": ""
}
|
import urllib.request
import re
import string
# Get the input data from the problem webpage
url = 'http://www.pythonchallenge.com/pc/def/map.html'
with urllib.request.urlopen(url) as response:
html = response.read().decode('utf-8')
data = re.search('g.*spj\.', html)
data = data.group(0)
print('Input data:\n', data, '\n')
# Image hint suggests a shift cipher with shift=2
alphabet = string.ascii_lowercase
cipher = alphabet[2:] + alphabet[:2]
trans_table = str.maketrans(alphabet, cipher)
decoded = data.translate(trans_table)
print('Decoded input data:\n', decoded, '\n')
# Decrypted message says to apply cipher to url. Trial and error reveals only the end of the url path is targeted
splitUrl = url.split('/')
target = splitUrl[-1].rstrip('.html')
transTarget = target.translate(trans_table)
newUrl = splitUrl[:]
newUrl[-1] = transTarget + '.html'
newUrl = '/'.join(newUrl)
solution = newUrl
print('Solution url:\n', solution, '\n')
|
{
"content_hash": "2b603f1497b210a724af7b4b4abb2855",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 113,
"avg_line_length": 32.6551724137931,
"alnum_prop": 0.7159450897571278,
"repo_name": "medwig/pythonchallenge-solutions",
"id": "2e5bdeb647cfc025727c4fad63e8bc7bd3c5d430",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5466"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="sizesrc",
parent_name="choroplethmapbox.hoverlabel.font",
**kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "ab8dc8f85bb64ea19d20e2a13c0d2a93",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 29.470588235294116,
"alnum_prop": 0.5748502994011976,
"repo_name": "plotly/python-api",
"id": "666d77b1d47cce3caa4b3d531b5abc2da1d522dd",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choroplethmapbox/hoverlabel/font/_sizesrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from contextlib import nullcontext
from copy import deepcopy
import os
import os.path as op
from shutil import copyfile
import re
import numpy as np
from numpy.fft import fft
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal, assert_array_less)
import pytest
from scipy import sparse
from scipy.optimize import fmin_cobyla
from scipy.spatial.distance import cdist
import mne
from mne import (stats, SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, Label, read_source_spaces,
read_evokeds, MixedSourceEstimate, find_events, Epochs,
read_source_estimate, extract_label_time_course,
spatio_temporal_tris_adjacency, stc_near_sensors,
spatio_temporal_src_adjacency, read_cov, EvokedArray,
spatial_inter_hemi_adjacency, read_forward_solution,
spatial_src_adjacency, spatial_tris_adjacency, pick_info,
SourceSpaces, VolVectorSourceEstimate, read_trans, pick_types,
MixedVectorSourceEstimate, setup_volume_source_space,
convert_forward_solution, pick_types_forward,
compute_source_morph, labels_to_stc, scale_mri,
write_source_spaces)
from mne.datasets import testing
from mne.externals.h5io import write_hdf5
from mne.fixes import _get_img_fdata
from mne.io import read_info
from mne.io.constants import FIFF
from mne.morph_map import _make_morph_map_hemi
from mne.source_estimate import grade_to_tris, _get_vol_mask
from mne.source_space import _get_src_nn
from mne.transforms import apply_trans, invert_transform, transform_surface_to
from mne.minimum_norm import (read_inverse_operator, apply_inverse,
apply_inverse_epochs, make_inverse_operator)
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (requires_pandas, requires_sklearn, catch_logging,
requires_h5py, requires_nibabel, requires_version)
from mne.io import read_raw_fif
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_inv_fixed = op.join(
data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-fixed-inv.fif')
fname_fwd = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_cov = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname_fs_t1 = op.join(data_path, 'subjects', 'fsaverage', 'mri', 'T1.mgz')
fname_aseg = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
fname_src = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_src_fs = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
bem_path = op.join(data_path, 'subjects', 'sample', 'bem')
fname_src_3 = op.join(bem_path, 'sample-oct-4-src.fif')
fname_src_vol = op.join(bem_path, 'sample-volume-7mm-src.fif')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_inv_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
fname_nirx = op.join(data_path, 'NIRx', 'nirscout', 'nirx_15_0_recording')
rng = np.random.RandomState(0)
@testing.requires_testing_data
def test_stc_baseline_correction():
"""Test baseline correction for source estimate objects."""
# test on different source estimates
stcs = [read_source_estimate(fname_stc),
read_source_estimate(fname_vol, 'sample')]
# test on different "baseline" intervals
baselines = [(0., 0.1), (None, None)]
for stc in stcs:
times = stc.times
for (start, stop) in baselines:
# apply baseline correction, then check if it worked
stc = stc.apply_baseline(baseline=(start, stop))
t0 = start or stc.times[0]
t1 = stop or stc.times[-1]
# index for baseline interval (include boundary latencies)
imin = np.abs(times - t0).argmin()
imax = np.abs(times - t1).argmin() + 1
# data matrix from baseline interval
data_base = stc.data[:, imin:imax]
mean_base = data_base.mean(axis=1)
zero_array = np.zeros(mean_base.shape[0])
# test if baseline properly subtracted (mean=zero for all sources)
assert_array_almost_equal(mean_base, zero_array)
@testing.requires_testing_data
def test_spatial_inter_hemi_adjacency():
"""Test spatial adjacency between hemispheres."""
# trivial cases
conn = spatial_inter_hemi_adjacency(fname_src_3, 5e-6)
assert_equal(conn.data.size, 0)
conn = spatial_inter_hemi_adjacency(fname_src_3, 5e6)
assert_equal(conn.data.size, np.prod(conn.shape) // 2)
# actually interesting case (1cm), should be between 2 and 10% of verts
src = read_source_spaces(fname_src_3)
conn = spatial_inter_hemi_adjacency(src, 10e-3)
conn = conn.tocsr()
n_src = conn.shape[0]
assert (n_src * 0.02 < conn.data.size < n_src * 0.10)
assert_equal(conn[:src[0]['nuse'], :src[0]['nuse']].data.size, 0)
assert_equal(conn[-src[1]['nuse']:, -src[1]['nuse']:].data.size, 0)
c = (conn.T + conn) / 2. - conn
c.eliminate_zeros()
assert_equal(c.data.size, 0)
# check locations
upper_right = conn[:src[0]['nuse'], src[0]['nuse']:].toarray()
assert_equal(upper_right.sum(), conn.sum() // 2)
good_labels = ['S_pericallosal', 'Unknown', 'G_and_S_cingul-Mid-Post',
'G_cuneus']
for hi, hemi in enumerate(('lh', 'rh')):
has_neighbors = src[hi]['vertno'][np.where(np.any(upper_right,
axis=1 - hi))[0]]
labels = read_labels_from_annot('sample', 'aparc.a2009s', hemi,
subjects_dir=subjects_dir)
use_labels = [label.name[:-3] for label in labels
if np.in1d(label.vertices, has_neighbors).any()]
assert (set(use_labels) - set(good_labels) == set())
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_h5py
def test_volume_stc(tmpdir):
"""Test volume STCs."""
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data,
data,
np.arange(2)[:, np.newaxis],
np.arange(6).reshape(2, 3, 1)]
vertno = np.arange(N)
vertnos = [vertno,
vertno[:, np.newaxis],
np.arange(2)[:, np.newaxis],
np.arange(2)]
vertno_reads = [vertno, vertno, np.arange(2), np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
if data.ndim in (1, 2):
stc = VolSourceEstimate(data, [vertno], 0, 1)
ext = 'stc'
klass = VolSourceEstimate
else:
assert data.ndim == 3
stc = VolVectorSourceEstimate(data, [vertno], 0, 1)
ext = 'h5'
klass = VolVectorSourceEstimate
fname_temp = tmpdir.join('temp-vl.' + ext)
stc_new = stc
n = 3 if ext == 'h5' else 2
for ii in range(n):
if ii < 2:
stc_new.save(fname_temp)
else:
# Pass stc.vertices[0], an ndarray, to ensure support for
# the way we used to write volume STCs
write_hdf5(
str(fname_temp), dict(
vertices=stc.vertices[0], data=stc.data,
tmin=stc.tmin, tstep=stc.tstep,
subject=stc.subject, src_type=stc._src_type),
title='mnepython', overwrite=True)
stc_new = read_source_estimate(fname_temp)
assert isinstance(stc_new, klass)
assert_array_equal(vertno_read, stc_new.vertices[0])
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert isinstance(stc, VolSourceEstimate)
assert 'sample' in repr(stc)
assert ' kB' in repr(stc)
stc_new = stc
pytest.raises(ValueError, stc.save, fname_vol, ftype='whatever')
for ftype in ['w', 'h5']:
for _ in range(2):
fname_temp = tmpdir.join('temp-vol.%s' % ftype)
stc_new.save(fname_temp, ftype=ftype)
stc_new = read_source_estimate(fname_temp)
assert (isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertices[0], stc_new.vertices[0])
assert_array_almost_equal(stc.data, stc_new.data)
@requires_nibabel()
@testing.requires_testing_data
def test_stc_as_volume():
"""Test previous volume source estimate morph."""
import nibabel as nib
inverse_operator_vol = read_inverse_operator(fname_inv_vol)
# Apply inverse operator
stc_vol = read_source_estimate(fname_vol, 'sample')
img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=True,
dest='42')
t1_img = nib.load(fname_t1)
# always assure nifti and dimensionality
assert isinstance(img, nib.Nifti1Image)
assert img.header.get_zooms()[:3] == t1_img.header.get_zooms()[:3]
img = stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=False)
assert isinstance(img, nib.Nifti1Image)
assert img.shape[:3] == inverse_operator_vol['src'][0]['shape'][:3]
with pytest.raises(ValueError, match='Invalid value.*output.*'):
stc_vol.as_volume(inverse_operator_vol['src'], format='42')
@testing.requires_testing_data
@requires_nibabel()
def test_save_vol_stc_as_nifti(tmpdir):
"""Save the stc as a nifti file and export."""
import nibabel as nib
src = read_source_spaces(fname_vsrc)
vol_fname = tmpdir.join('stc.nii.gz')
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert (isinstance(stc, VolSourceEstimate))
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with pytest.warns(None): # nib<->numpy
img = nib.load(str(vol_fname))
assert (img.shape == src[0]['shape'] + (len(stc.times),))
with pytest.warns(None): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(tmpdir.join('stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with pytest.warns(None): # nib<->numpy
img = nib.load(str(vol_fname))
assert (img.shape == t1_img.shape + (len(stc.times),))
assert_allclose(img.affine, t1_img.affine, atol=1e-5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert (img.shape == t1_img.shape + (len(stc.times),))
assert_allclose(img.affine, t1_img.affine, atol=1e-5)
src = SourceSpaces([src[0], src[0]])
stc = VolSourceEstimate(np.r_[stc.data, stc.data],
[stc.vertices[0], stc.vertices[0]],
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
img = stc.as_volume(src, dest='mri', mri_resolution=False)
assert (img.shape == src[0]['shape'] + (len(stc.times),))
@testing.requires_testing_data
def test_expand():
"""Test stc expansion."""
stc_ = read_source_estimate(fname_stc, 'sample')
vec_stc_ = VectorSourceEstimate(np.zeros((stc_.data.shape[0], 3,
stc_.data.shape[1])),
stc_.vertices, stc_.tmin, stc_.tstep,
stc_.subject)
for stc in [stc_, vec_stc_]:
assert ('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_limited = stc.in_label(new_label)
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertices)
pytest.raises(TypeError, stc_new.expand, stc_limited.vertices[0])
pytest.raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
# make sure we can't add unless vertno agree
pytest.raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10, is_complex=False):
np.random.seed(7)
verts = [np.arange(10), np.arange(90)]
data = np.random.rand(100, n_time)
if is_complex:
data.astype(complex)
return SourceEstimate(data, verts, 0, 1e-1, 'foo')
def _fake_vec_stc(n_time=10, is_complex=False):
np.random.seed(7)
verts = [np.arange(10), np.arange(90)]
data = np.random.rand(100, 3, n_time)
if is_complex:
data.astype(complex)
return VectorSourceEstimate(data, verts, 0, 1e-1,
'foo')
@testing.requires_testing_data
def test_stc_snr():
"""Test computing SNR from a STC."""
inv = read_inverse_operator(fname_inv_fixed)
fwd = read_forward_solution(fname_fwd)
cov = read_cov(fname_cov)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
stc = apply_inverse(evoked, inv)
assert (stc.data < 0).any()
with pytest.warns(RuntimeWarning, match='nAm'):
stc.estimate_snr(evoked.info, fwd, cov) # dSPM
with pytest.warns(RuntimeWarning, match='free ori'):
abs(stc).estimate_snr(evoked.info, fwd, cov)
stc = apply_inverse(evoked, inv, method='MNE')
snr = stc.estimate_snr(evoked.info, fwd, cov)
assert_allclose(snr.times, evoked.times)
snr = snr.data
assert snr.max() < -10
assert snr.min() > -120
def test_stc_attributes():
"""Test STC attributes."""
stc = _fake_stc(n_time=10)
vec_stc = _fake_vec_stc(n_time=10)
n_times = len(stc.times)
assert_equal(stc._data.shape[-1], n_times)
assert_array_equal(stc.times, stc.tmin + np.arange(n_times) * stc.tstep)
assert_array_almost_equal(
stc.times, [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
def attempt_times_mutation(stc):
stc.times -= 1
def attempt_assignment(stc, attr, val):
setattr(stc, attr, val)
# .times is read-only
pytest.raises(ValueError, attempt_times_mutation, stc)
pytest.raises(ValueError, attempt_assignment, stc, 'times', [1])
# Changing .tmin or .tstep re-computes .times
stc.tmin = 1
assert (type(stc.tmin) == float)
assert_array_almost_equal(
stc.times, [1., 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])
stc.tstep = 1
assert (type(stc.tstep) == float)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
# tstep <= 0 is not allowed
pytest.raises(ValueError, attempt_assignment, stc, 'tstep', 0)
pytest.raises(ValueError, attempt_assignment, stc, 'tstep', -1)
# Changing .data re-computes .times
stc.data = np.random.rand(100, 5)
assert_array_almost_equal(
stc.times, [1., 2., 3., 4., 5.])
# .data must match the number of vertices
pytest.raises(ValueError, attempt_assignment, stc, 'data', [[1]])
pytest.raises(ValueError, attempt_assignment, stc, 'data', None)
# .data much match number of dimensions
pytest.raises(ValueError, attempt_assignment, stc, 'data', np.arange(100))
pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
[np.arange(100)])
pytest.raises(ValueError, attempt_assignment, vec_stc, 'data',
[[[np.arange(100)]]])
# .shape attribute must also work when ._data is None
stc._kernel = np.zeros((2, 2))
stc._sens_data = np.zeros((2, 3))
stc._data = None
assert_equal(stc.shape, (2, 3))
# bad size of data
stc = _fake_stc()
data = stc.data[:, np.newaxis, :]
with pytest.raises(ValueError, match='2 dimensions for SourceEstimate'):
SourceEstimate(data, stc.vertices, 0, 1)
stc = SourceEstimate(data[:, 0, 0], stc.vertices, 0, 1)
assert stc.data.shape == (len(data), 1)
def test_io_stc(tmpdir):
"""Test IO for STC files."""
stc = _fake_stc()
stc.save(tmpdir.join("tmp.stc"))
stc2 = read_source_estimate(tmpdir.join("tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertices), len(stc2.vertices))
for v1, v2 in zip(stc.vertices, stc2.vertices):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
# test warning for complex data
stc2.data = stc2.data.astype(np.complex128)
with pytest.raises(ValueError, match='Cannot save complex-valued STC'):
stc2.save(tmpdir.join('complex.stc'))
@requires_h5py
@pytest.mark.parametrize('is_complex', (True, False))
@pytest.mark.parametrize('vector', (True, False))
def test_io_stc_h5(tmpdir, is_complex, vector):
"""Test IO for STC files using HDF5."""
if vector:
stc = _fake_vec_stc(is_complex=is_complex)
else:
stc = _fake_stc(is_complex=is_complex)
pytest.raises(ValueError, stc.save, tmpdir.join('tmp'),
ftype='foo')
out_name = tmpdir.join('tmp')
stc.save(out_name, ftype='h5')
stc.save(out_name, ftype='h5') # test overwrite
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc')
stc5 = read_source_estimate(out_name + '-stc.h5')
pytest.raises(RuntimeError, read_source_estimate, out_name,
subject='bar')
for stc_new in stc3, stc4, stc5:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertices), len(stc.vertices))
for v1, v2 in zip(stc_new.vertices, stc.vertices):
assert_array_equal(v1, v2)
def test_io_w(tmpdir):
"""Test IO for w files."""
stc = _fake_stc(n_time=1)
w_fname = tmpdir.join('fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(tmpdir.join('tmp'), ftype='w')
src2 = read_source_estimate(tmpdir.join('tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files."""
stc = _fake_stc()
data = stc.data.copy()
vec_stc = _fake_vec_stc()
vec_data = vec_stc.data.copy()
out = list()
for a in [data, stc, vec_data, vec_stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with np.errstate(invalid='ignore'):
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
b = 2 + a
b = 2 - a
b = +a
assert_array_equal(b.data, a.data)
with np.errstate(invalid='ignore'):
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(out[2], out[3].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
assert_array_equal(vec_stc.sqrt().data, np.sqrt(vec_stc.data))
assert_array_equal(abs(stc).data, abs(stc.data))
assert_array_equal(abs(vec_stc).data, abs(vec_stc.data))
stc_sum = stc.sum()
assert_array_equal(stc_sum.data, stc.data.sum(1, keepdims=True))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, stc.data.mean(1, keepdims=True))
vec_stc_mean = vec_stc.mean()
assert_array_equal(vec_stc_mean.data, vec_stc.data.mean(2, keepdims=True))
@pytest.mark.slowtest
@testing.requires_testing_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), resample()."""
stc_ = read_source_estimate(fname_stc)
# Make a vector version of the above source estimate
x = stc_.data[:, np.newaxis, :]
yz = np.zeros((x.shape[0], 2, x.shape[2]))
vec_stc_ = VectorSourceEstimate(
np.concatenate((x, yz), 1),
stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject
)
for stc in [stc_, vec_stc_]:
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
binned = stc.bin(.12)
a = np.mean(stc.data[..., :np.searchsorted(stc.times, .12)], axis=-1)
assert_array_equal(a, binned.data[..., 0])
stc = read_source_estimate(fname_stc)
stc.subject = 'sample'
label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)[0]
label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
subjects_dir=subjects_dir)[0]
label_both = label_lh + label_rh
for label in (label_lh, label_rh, label_both):
assert (isinstance(stc.shape, tuple) and len(stc.shape) == 2)
stc_label = stc.in_label(label)
if label.hemi != 'both':
if label.hemi == 'lh':
verts = stc_label.vertices[0]
else: # label.hemi == 'rh':
verts = stc_label.vertices[1]
n_vertices_used = len(label.get_vertices_used(verts))
assert_equal(len(stc_label.data), n_vertices_used)
stc_lh = stc.in_label(label_lh)
pytest.raises(ValueError, stc_lh.in_label, label_rh)
label_lh.subject = 'foo'
pytest.raises(RuntimeError, stc.in_label, label_lh)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0)
assert (stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert (stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert (stc_new.data.shape[1] == stc.data.shape[1])
assert (stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@testing.requires_testing_data
def test_center_of_mass():
"""Test computing the center of mass on an stc."""
stc = read_source_estimate(fname_stc)
pytest.raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert (hemi == 1)
# XXX Should design a fool-proof test case, but here were the
# results:
assert_equal(vertex, 124791)
assert_equal(np.round(t, 2), 0.12)
@testing.requires_testing_data
@pytest.mark.parametrize('kind', ('surface', 'mixed'))
@pytest.mark.parametrize('vector', (False, True))
def test_extract_label_time_course(kind, vector):
"""Test extraction of label time courses from (Mixed)SourceEstimate."""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
if kind == 'mixed':
pytest.importorskip('nibabel')
label_names = ('Left-Cerebellum-Cortex',
'Right-Cerebellum-Cortex')
src += setup_volume_source_space(
'sample', pos=20., volume_label=label_names,
subjects_dir=subjects_dir, add_interpolator=False)
klass = MixedVectorSourceEstimate
else:
klass = VectorSourceEstimate
if not vector:
klass = klass._scalar_class
vertices = [s['vertno'] for s in src]
n_verts = np.array([len(v) for v in vertices])
vol_means = np.arange(-1, 1 - len(src), -1)
vol_means_t = np.repeat(vol_means[:, np.newaxis], n_times, axis=1)
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_tcs = dict(
mean=np.arange(n_labels)[:, None] * np.ones((n_labels, n_times)))
label_tcs['max'] = label_tcs['mean']
# compute the mean with sign flip
label_tcs['mean_flip'] = np.zeros_like(label_tcs['mean'])
for i, label in enumerate(labels):
label_tcs['mean_flip'][i] = i * np.mean(
label_sign_flip(label, src[:2]))
# generate some stc's with known data
stcs = list()
pad = (((0, 0), (2, 0), (0, 0)), 'constant')
for i in range(n_stcs):
data = np.zeros((n_verts.sum(), n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_tcs['mean'][j]
for j in range(len(vol_means)):
offset = n_verts[:2 + j].sum()
data[offset:offset + n_verts[j]] = vol_means[j]
if vector:
# the values it on the Z axis
data = np.pad(data[:, np.newaxis], *pad)
this_stc = klass(data, vertices, 0, 1)
stcs.append(this_stc)
if vector:
for key in label_tcs:
label_tcs[key] = np.pad(label_tcs[key][:, np.newaxis], *pad)
vol_means_t = np.pad(vol_means_t[:, np.newaxis], *pad)
# test some invalid inputs
with pytest.raises(ValueError, match="Invalid value for the 'mode'"):
extract_label_time_course(stcs, labels, src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
with pytest.raises(ValueError, match='does not contain any vertices'):
extract_label_time_course(stcs, empty_label, src)
# but this works:
with pytest.warns(RuntimeWarning, match='does not contain any vertices'):
tc = extract_label_time_course(stcs, empty_label, src,
allow_empty=True)
end_shape = (3, n_times) if vector else (n_times,)
for arr in tc:
assert arr.shape == (1 + len(vol_means),) + end_shape
assert_array_equal(arr[:1], np.zeros((1,) + end_shape))
if len(vol_means):
assert_array_equal(arr[1:], vol_means_t)
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max', 'auto']
for mode in modes:
if vector and mode not in ('mean', 'max', 'auto'):
with pytest.raises(ValueError, match='when using a vector'):
extract_label_time_course(stcs, labels, src, mode=mode)
continue
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode)
for stc in stcs]
assert (len(label_tc) == n_stcs)
assert (len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert tc1.shape == (n_labels + len(vol_means),) + end_shape
assert tc2.shape == (n_labels + len(vol_means),) + end_shape
assert_allclose(tc1, tc2, rtol=1e-8, atol=1e-16)
if mode == 'auto':
use_mode = 'mean' if vector else 'mean_flip'
else:
use_mode = mode
# XXX we don't check pca_flip, probably should someday...
if use_mode in ('mean', 'max', 'mean_flip'):
assert_array_almost_equal(tc1[:n_labels], label_tcs[use_mode])
assert_array_almost_equal(tc1[n_labels:], vol_means_t)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src[:2])
assert (len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src[:2])
assert (x.size == 0)
@testing.requires_testing_data
@pytest.mark.parametrize('label_type, mri_res, vector, test_label, cf, call', [
(str, False, False, False, 'head', 'meth'), # head frame
(str, False, False, str, 'mri', 'func'), # fastest, default for testing
(str, False, True, int, 'mri', 'func'), # vector
(str, True, False, False, 'mri', 'func'), # mri_resolution
(list, True, False, False, 'mri', 'func'), # volume label as list
(dict, True, False, False, 'mri', 'func'), # volume label as dict
])
def test_extract_label_time_course_volume(
src_volume_labels, label_type, mri_res, vector, test_label, cf, call):
"""Test extraction of label time courses from Vol(Vector)SourceEstimate."""
src_labels, volume_labels, lut = src_volume_labels
n_tot = 46
assert n_tot == len(src_labels)
inv = read_inverse_operator(fname_inv_vol)
if cf == 'head':
src = inv['src']
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
rr = apply_trans(invert_transform(inv['mri_head_t']), src[0]['rr'])
else:
assert cf == 'mri'
src = read_source_spaces(fname_src_vol)
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI
rr = src[0]['rr']
for s in src_labels:
assert_allclose(s['rr'], rr, atol=1e-7)
assert len(src) == 1 and src.kind == 'volume'
klass = VolVectorSourceEstimate
if not vector:
klass = klass._scalar_class
vertices = [src[0]['vertno']]
n_verts = len(src[0]['vertno'])
n_times = 50
data = vertex_values = np.arange(1, n_verts + 1)
end_shape = (n_times,)
if vector:
end_shape = (3,) + end_shape
data = np.pad(data[:, np.newaxis], ((0, 0), (2, 0)), 'constant')
data = np.repeat(data[..., np.newaxis], n_times, -1)
stcs = [klass(data.astype(float), vertices, 0, 1)]
def eltc(*args, **kwargs):
if call == 'func':
return extract_label_time_course(stcs, *args, **kwargs)
else:
assert call == 'meth'
return [stcs[0].extract_label_time_course(*args, **kwargs)]
with pytest.raises(RuntimeError, match='atlas vox_mri_t does not match'):
eltc(fname_fs_t1, src, mri_resolution=mri_res)
assert len(src_labels) == 46 # includes unknown
assert_array_equal(
src[0]['vertno'], # src includes some in "unknown" space
np.sort(np.concatenate([s['vertno'] for s in src_labels])))
# spot check
assert src_labels[-1]['seg_name'] == 'CC_Anterior'
assert src[0]['nuse'] == 4157
assert len(src[0]['vertno']) == 4157
assert sum(s['nuse'] for s in src_labels) == 4157
assert_array_equal(src_labels[-1]['vertno'], [8011, 8032, 8557])
assert_array_equal(
np.where(np.in1d(src[0]['vertno'], [8011, 8032, 8557]))[0],
[2672, 2688, 2995])
# triage "labels" argument
if mri_res:
# All should be there
missing = []
else:
# Nearest misses these
missing = ['Left-vessel', 'Right-vessel', '5th-Ventricle',
'non-WM-hypointensities']
n_want = len(src_labels)
if label_type is str:
labels = fname_aseg
elif label_type is list:
labels = (fname_aseg, volume_labels)
else:
assert label_type is dict
labels = (fname_aseg, {k: lut[k] for k in volume_labels})
assert mri_res
assert len(missing) == 0
# we're going to add one that won't exist
missing = ['intentionally_bad']
labels[1][missing[0]] = 10000
n_want += 1
n_tot += 1
n_want -= len(missing)
# actually do the testing
if cf == 'head' and not mri_res: # some missing
with pytest.warns(RuntimeWarning, match='any vertices'):
eltc(labels, src, allow_empty=True, mri_resolution=mri_res)
for mode in ('mean', 'max'):
with catch_logging() as log:
label_tc = eltc(labels, src, mode=mode, allow_empty='ignore',
mri_resolution=mri_res, verbose=True)
log = log.getvalue()
assert re.search('^Reading atlas.*aseg\\.mgz\n', log) is not None
if len(missing):
# assert that the missing ones get logged
assert 'does not contain' in log
assert repr(missing) in log
else:
assert 'does not contain' not in log
assert '\n%d/%d atlas regions had at least' % (n_want, n_tot) in log
assert len(label_tc) == 1
label_tc = label_tc[0]
assert label_tc.shape == (n_tot,) + end_shape
if vector:
assert_array_equal(label_tc[:, :2], 0.)
label_tc = label_tc[:, 2]
assert label_tc.shape == (n_tot, n_times)
# let's test some actual values by trusting the masks provided by
# setup_volume_source_space. mri_resolution=True does some
# interpolation so we should not expect equivalence, False does
# nearest so we should.
if mri_res:
rtol = 0.2 if mode == 'mean' else 0.8 # max much more sensitive
else:
rtol = 0.
for si, s in enumerate(src_labels):
func = dict(mean=np.mean, max=np.max)[mode]
these = vertex_values[np.in1d(src[0]['vertno'], s['vertno'])]
assert len(these) == s['nuse']
if si == 0 and s['seg_name'] == 'Unknown':
continue # unknown is crappy
if s['nuse'] == 0:
want = 0.
if mri_res:
# this one is totally due to interpolation, so no easy
# test here
continue
else:
want = func(these)
assert_allclose(label_tc[si], want, atol=1e-6, rtol=rtol)
# compare with in_label, only on every fourth for speed
if test_label is not False and si % 4 == 0:
label = s['seg_name']
if test_label is int:
label = lut[label]
in_label = stcs[0].in_label(
label, fname_aseg, src).data
assert in_label.shape == (s['nuse'],) + end_shape
if vector:
assert_array_equal(in_label[:, :2], 0.)
in_label = in_label[:, 2]
if want == 0:
assert in_label.shape[0] == 0
else:
in_label = func(in_label)
assert_allclose(in_label, want, atol=1e-6, rtol=rtol)
if mode == 'mean' and not vector: # check the reverse
if label_type is dict:
ctx = pytest.warns(RuntimeWarning, match='does not contain')
else:
ctx = nullcontext()
with ctx:
stc_back = labels_to_stc(labels, label_tc, src=src)
assert stc_back.data.shape == stcs[0].data.shape
corr = np.corrcoef(stc_back.data.ravel(),
stcs[0].data.ravel())[0, 1]
assert 0.6 < corr < 0.63
assert_allclose(_varexp(label_tc, label_tc), 1.)
ve = _varexp(stc_back.data, stcs[0].data)
assert 0.83 < ve < 0.85
with pytest.warns(None): # ignore warnings about no output
label_tc_rt = extract_label_time_course(
stc_back, labels, src=src, mri_resolution=mri_res,
allow_empty=True)
assert label_tc_rt.shape == label_tc.shape
corr = np.corrcoef(label_tc.ravel(), label_tc_rt.ravel())[0, 1]
lower, upper = (0.99, 0.999) if mri_res else (0.95, 0.97)
assert lower < corr < upper
ve = _varexp(label_tc_rt, label_tc)
lower, upper = (0.99, 0.999) if mri_res else (0.97, 0.99)
assert lower < ve < upper
def _varexp(got, want):
return max(
1 - np.linalg.norm(got.ravel() - want.ravel()) ** 2 /
np.linalg.norm(want) ** 2, 0.)
@testing.requires_testing_data
def test_extract_label_time_course_equiv():
"""Test extraction of label time courses from stc equivalences."""
label = read_labels_from_annot('sample', 'aparc', 'lh', regexp='transv',
subjects_dir=subjects_dir)
assert len(label) == 1
label = label[0]
inv = read_inverse_operator(fname_inv)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
stc = apply_inverse(evoked, inv, pick_ori='normal', label=label)
stc_full = apply_inverse(evoked, inv, pick_ori='normal')
stc_in_label = stc_full.in_label(label)
mean = stc.extract_label_time_course(label, inv['src'])
mean_2 = stc_in_label.extract_label_time_course(label, inv['src'])
assert_allclose(mean, mean_2)
inv['src'][0]['vertno'] = np.array([], int)
assert len(stc_in_label.vertices[0]) == 22
with pytest.raises(ValueError, match='22/22 left hemisphere.*missing'):
stc_in_label.extract_label_time_course(label, inv['src'])
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result."""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data."""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = rng.randn(n_vertices, n_sensors)
sens_data = rng.randn(n_sensors, n_times)
vertices = [np.arange(n_vertices)]
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
# bad sens_data
sens_data = sens_data[..., np.newaxis]
with pytest.raises(ValueError, match='sensor data must have 2'):
VolSourceEstimate((kernel, sens_data), vertices, 0, 1)
def test_transform():
"""Test applying linear (time) transform to data."""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert (isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
pytest.raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert (isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
tmin_idx = np.searchsorted(times, 0)
tmax_idx = np.searchsorted(times, 501) # Include 500ms in the range
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert (isinstance(stc, SourceEstimate))
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.5)
assert_equal(len(stc.vertices[0]), 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.2)
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_adjacency():
"""Test spatio-temporal adjacency from triangles."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
adjacency = spatio_temporal_tris_adjacency(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), adjacency)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert len(new_fmt) == len(components)
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@testing.requires_testing_data
def test_spatio_temporal_src_adjacency():
"""Test spatio-temporal adjacency from source spaces."""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
adjacency = spatio_temporal_tris_adjacency(tris, 2).todense()
assert_allclose(np.diag(adjacency), 1.)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
adjacency2 = spatio_temporal_src_adjacency(src, 2)
assert_array_equal(adjacency2.todense(), adjacency)
# add test for dist adjacency
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
src[0]['type'] = 'surf'
src[1]['type'] = 'surf'
adjacency3 = spatio_temporal_src_adjacency(src, 2, dist=2)
assert_array_equal(adjacency3.todense(), adjacency)
# add test for source space adjacency with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
src_ = inverse_operator['src']
with pytest.warns(RuntimeWarning, match='will have holes'):
adjacency = spatio_temporal_src_adjacency(src_, n_times=2)
a = adjacency.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert (a == b)
assert_equal(grade_to_tris(5).shape, [40960, 3])
@requires_pandas
def test_to_data_frame():
"""Test stc Pandas exporter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)]
data = rng.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[:1], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
df = stc.to_data_frame()
# test data preservation (first 2 dataframe elements are subj & time)
assert_array_equal(df.values.T[2:], stc.data)
# test long format
df_long = stc.to_data_frame(long_format=True)
assert(len(df_long) == stc.data.size)
expected = ('subject', 'time', 'source', 'value')
assert set(expected) == set(df_long.columns)
@requires_pandas
@pytest.mark.parametrize('index', ('time', ['time', 'subject'], None))
def test_to_data_frame_index(index):
"""Test index creation in stc Pandas exporter."""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)]
data = rng.randn(n_vert, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
df = stc.to_data_frame(index=index)
# test index setting
if not isinstance(index, list):
index = [index]
assert (df.index.names == index)
# test that non-indexed data were present as columns
non_index = list(set(['time', 'subject']) - set(index))
if len(non_index):
assert all(np.in1d(non_index, df.columns))
@pytest.mark.parametrize('kind', ('surface', 'mixed', 'volume'))
@pytest.mark.parametrize('vector', (False, True))
@pytest.mark.parametrize('n_times', (5, 1))
def test_get_peak(kind, vector, n_times):
"""Test peak getter."""
n_vert = 10
vertices = [np.arange(n_vert)]
if kind == 'surface':
klass = VectorSourceEstimate
vertices += [np.empty(0, int)]
elif kind == 'mixed':
klass = MixedVectorSourceEstimate
vertices += [np.empty(0, int), np.empty(0, int)]
else:
assert kind == 'volume'
klass = VolVectorSourceEstimate
data = np.zeros((n_vert, n_times))
data[1, -1] = 1
if vector:
data = np.repeat(data[:, np.newaxis], 3, 1)
else:
klass = klass._scalar_class
stc = klass(data, vertices, 0, 1)
with pytest.raises(ValueError, match='out of bounds'):
stc.get_peak(tmin=-100)
with pytest.raises(ValueError, match='out of bounds'):
stc.get_peak(tmax=90)
with pytest.raises(ValueError,
match='smaller or equal' if n_times > 1 else 'out of'):
stc.get_peak(tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertices)
assert vert_idx in vertno
assert time_idx in stc.times
data_idx, time_idx = stc.get_peak(vert_as_index=True, time_as_index=True)
if vector:
use_data = stc.magnitude().data
else:
use_data = stc.data
assert data_idx == 1
assert time_idx == n_times - 1
assert data_idx == np.argmax(np.abs(use_data[:, time_idx]))
assert time_idx == np.argmax(np.abs(use_data[data_idx, :]))
if kind == 'surface':
data_idx_2, time_idx_2 = stc.get_peak(
vert_as_index=True, time_as_index=True, hemi='lh')
assert data_idx_2 == data_idx
assert time_idx_2 == time_idx
with pytest.raises(RuntimeError, match='no vertices'):
stc.get_peak(hemi='rh')
@requires_h5py
@testing.requires_testing_data
def test_mixed_stc(tmpdir):
"""Test source estimate from mixed source space."""
N = 90 # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
# make sure error is raised if vertices are not a list of length >= 2
pytest.raises(ValueError, MixedSourceEstimate, data=data,
vertices=[np.arange(N)])
stc = MixedSourceEstimate(data, vertno, 0, 1)
# make sure error is raised for plotting surface with volume source
fname = tmpdir.join('mixed-stc.h5')
stc.save(fname)
stc_out = read_source_estimate(fname)
assert_array_equal(stc_out.vertices, vertno)
assert_array_equal(stc_out.data, data)
assert stc_out.tmin == 0
assert stc_out.tstep == 1
assert isinstance(stc_out, MixedSourceEstimate)
@requires_h5py
@pytest.mark.parametrize('klass, kind', [
(VectorSourceEstimate, 'surf'),
(VolVectorSourceEstimate, 'vol'),
(VolVectorSourceEstimate, 'discrete'),
(MixedVectorSourceEstimate, 'mixed'),
])
@pytest.mark.parametrize('dtype', [
np.float32, np.float64, np.complex64, np.complex128])
def test_vec_stc_basic(tmpdir, klass, kind, dtype):
"""Test (vol)vector source estimate."""
nn = np.array([
[1, 0, 0],
[0, 1, 0],
[np.sqrt(1. / 2.), 0, np.sqrt(1. / 2.)],
[np.sqrt(1 / 3.)] * 3
], np.float32)
data = np.array([
[1, 0, 0],
[0, 2, 0],
[-3, 0, 0],
[1, 1, 1],
], dtype)[:, :, np.newaxis]
amplitudes = np.array([1, 2, 3, np.sqrt(3)], dtype)
magnitudes = amplitudes.copy()
normals = np.array([1, 2, -3. / np.sqrt(2), np.sqrt(3)], dtype)
if dtype in (np.complex64, np.complex128):
data *= 1j
amplitudes *= 1j
normals *= 1j
directions = np.array(
[[1, 0, 0], [0, 1, 0], [-1, 0, 0], [1. / np.sqrt(3)] * 3])
vol_kind = kind if kind in ('discrete', 'vol') else 'vol'
vol_src = SourceSpaces([dict(nn=nn, type=vol_kind)])
assert vol_src.kind == dict(vol='volume').get(vol_kind, vol_kind)
vol_verts = [np.arange(4)]
surf_src = SourceSpaces([dict(nn=nn[:2], type='surf'),
dict(nn=nn[2:], type='surf')])
assert surf_src.kind == 'surface'
surf_verts = [np.array([0, 1]), np.array([0, 1])]
if klass is VolVectorSourceEstimate:
src = vol_src
verts = vol_verts
elif klass is VectorSourceEstimate:
src = surf_src
verts = surf_verts
if klass is MixedVectorSourceEstimate:
src = surf_src + vol_src
verts = surf_verts + vol_verts
assert src.kind == 'mixed'
data = np.tile(data, (2, 1, 1))
amplitudes = np.tile(amplitudes, 2)
magnitudes = np.tile(magnitudes, 2)
normals = np.tile(normals, 2)
directions = np.tile(directions, (2, 1))
stc = klass(data, verts, 0, 1, 'foo')
amplitudes = amplitudes[:, np.newaxis]
magnitudes = magnitudes[:, np.newaxis]
# Magnitude of the vectors
assert_array_equal(stc.magnitude().data, magnitudes)
# Vector components projected onto the vertex normals
if kind in ('vol', 'mixed'):
with pytest.raises(RuntimeError, match='surface or discrete'):
stc.project('normal', src)[0]
else:
normal = stc.project('normal', src)[0]
assert_allclose(normal.data[:, 0], normals)
# Maximal-variance component, either to keep amps pos or to align to src-nn
projected, got_directions = stc.project('pca')
assert_allclose(got_directions, directions)
assert_allclose(projected.data, amplitudes)
projected, got_directions = stc.project('pca', src)
flips = np.array([[1], [1], [-1.], [1]])
if klass is MixedVectorSourceEstimate:
flips = np.tile(flips, (2, 1))
assert_allclose(got_directions, directions * flips)
assert_allclose(projected.data, amplitudes * flips)
out_name = tmpdir.join('temp.h5')
stc.save(out_name)
stc_read = read_source_estimate(out_name)
assert_allclose(stc.data, stc_read.data)
assert len(stc.vertices) == len(stc_read.vertices)
for v1, v2 in zip(stc.vertices, stc_read.vertices):
assert_array_equal(v1, v2)
stc = klass(data[:, :, 0], verts, 0, 1) # upbroadcast
assert stc.data.shape == (len(data), 3, 1)
# Bad data
with pytest.raises(ValueError, match='must have shape.*3'):
klass(data[:, :2], verts, 0, 1)
data = data[:, :, np.newaxis]
with pytest.raises(ValueError, match='3 dimensions for .*VectorSource'):
klass(data, verts, 0, 1)
@pytest.mark.parametrize('real', (True, False))
def test_source_estime_project(real):
"""Test projecting a source estimate onto direction of max power."""
n_src, n_times = 4, 100
rng = np.random.RandomState(0)
data = rng.randn(n_src, 3, n_times)
if not real:
data = data + 1j * rng.randn(n_src, 3, n_times)
assert data.dtype == np.complex128
else:
assert data.dtype == np.float64
# Make sure that the normal we get maximizes the power
# (i.e., minimizes the negative power)
want_nn = np.empty((n_src, 3))
for ii in range(n_src):
x0 = np.ones(3)
def objective(x):
x = x / np.linalg.norm(x)
return -np.linalg.norm(np.dot(x, data[ii]))
want_nn[ii] = fmin_cobyla(objective, x0, (), rhobeg=0.1, rhoend=1e-6)
want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True)
stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1)
stc_max, directions = stc.project('pca')
flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True))
directions *= flips
assert_allclose(directions, want_nn, atol=2e-6)
@testing.requires_testing_data
def test_source_estime_project_label():
"""Test projecting a source estimate onto direction of max power."""
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
stc_free = apply_inverse(evoked, free, pick_ori='vector')
stc_pca = stc_free.project('pca', fwd['src'])[0]
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
new_label = labels_lh[0] + labels_lh[1]
stc_in_label = stc_free.in_label(new_label)
stc_pca_in_label = stc_pca.in_label(new_label)
stc_in_label_pca = stc_in_label.project('pca', fwd['src'])[0]
assert_array_equal(stc_pca_in_label.data, stc_in_label_pca.data)
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def invs():
"""Inverses of various amounts of loose."""
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
free_surf = make_inverse_operator(
evoked.info, fwd_surf, noise_cov, loose=1.)
freeish = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.9999)
fixed = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.)
fixedish = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.0001)
assert_allclose(free['source_nn'],
np.kron(np.ones(fwd['nsource']), np.eye(3)).T,
atol=1e-7)
# This is the one exception:
assert not np.allclose(free['source_nn'], free_surf['source_nn'])
assert_allclose(free['source_nn'],
np.tile(np.eye(3), (free['nsource'], 1)), atol=1e-7)
# All others are similar:
for other in (freeish, fixedish):
assert_allclose(free_surf['source_nn'], other['source_nn'], atol=1e-7)
assert_allclose(
free_surf['source_nn'][2::3], fixed['source_nn'], atol=1e-7)
expected_nn = np.concatenate([_get_src_nn(s) for s in fwd['src']])
assert_allclose(fixed['source_nn'], expected_nn, atol=1e-7)
return evoked, free, free_surf, freeish, fixed, fixedish
bad_normal = pytest.param(
'normal', marks=pytest.mark.xfail(raises=AssertionError))
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector'])
def test_vec_stc_inv_free(invs, pick_ori):
"""Test vector STC behavior with two free-orientation inverses."""
evoked, free, free_surf, _, _, _ = invs
stc_free = apply_inverse(evoked, free, pick_ori=pick_ori)
stc_free_surf = apply_inverse(evoked, free_surf, pick_ori=pick_ori)
assert_allclose(stc_free.data, stc_free_surf.data, atol=1e-5)
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'vector'])
def test_vec_stc_inv_free_surf(invs, pick_ori):
"""Test vector STC behavior with free and free-ish orientation invs."""
evoked, _, free_surf, freeish, _, _ = invs
stc_free = apply_inverse(evoked, free_surf, pick_ori=pick_ori)
stc_freeish = apply_inverse(evoked, freeish, pick_ori=pick_ori)
assert_allclose(stc_free.data, stc_freeish.data, atol=1e-3)
@pytest.mark.parametrize('pick_ori', (None, 'normal', 'vector'))
def test_vec_stc_inv_fixed(invs, pick_ori):
"""Test vector STC behavior with fixed-orientation inverses."""
evoked, _, _, _, fixed, fixedish = invs
stc_fixed = apply_inverse(evoked, fixed)
stc_fixed_vector = apply_inverse(evoked, fixed, pick_ori='vector')
assert_allclose(stc_fixed.data,
stc_fixed_vector.project('normal', fixed['src'])[0].data)
stc_fixedish = apply_inverse(evoked, fixedish, pick_ori=pick_ori)
if pick_ori == 'vector':
assert_allclose(stc_fixed_vector.data, stc_fixedish.data, atol=1e-2)
# two ways here: with magnitude...
assert_allclose(
abs(stc_fixed).data, stc_fixedish.magnitude().data, atol=1e-2)
# ... and when picking the normal (signed)
stc_fixedish = stc_fixedish.project('normal', fixedish['src'])[0]
elif pick_ori is None:
stc_fixed = abs(stc_fixed)
else:
assert pick_ori == 'normal' # no need to modify
assert_allclose(stc_fixed.data, stc_fixedish.data, atol=1e-2)
@testing.requires_testing_data
def test_epochs_vector_inverse():
"""Test vector inverse consistency between evoked and epochs."""
raw = read_raw_fif(fname_raw)
events = find_events(raw, stim_channel='STI 014')[:2]
reject = dict(grad=2000e-13, mag=4e-12, eog=150e-6)
epochs = Epochs(raw, events, None, 0, 0.01, baseline=None,
reject=reject, preload=True)
assert_equal(len(epochs), 2)
evoked = epochs.average(picks=range(len(epochs.ch_names)))
inv = read_inverse_operator(fname_inv)
method = "MNE"
snr = 3.
lambda2 = 1. / snr ** 2
stcs_epo = apply_inverse_epochs(epochs, inv, lambda2, method=method,
pick_ori='vector', return_generator=False)
stc_epo = np.mean(stcs_epo)
stc_evo = apply_inverse(evoked, inv, lambda2, method=method,
pick_ori='vector')
assert_allclose(stc_epo.data, stc_evo.data, rtol=1e-9, atol=0)
@requires_sklearn
@testing.requires_testing_data
def test_vol_adjacency():
"""Test volume adjacency."""
vol = read_source_spaces(fname_vsrc)
pytest.raises(ValueError, spatial_src_adjacency, vol, dist=1.)
adjacency = spatial_src_adjacency(vol)
n_vertices = vol[0]['inuse'].sum()
assert_equal(adjacency.shape, (n_vertices, n_vertices))
assert (np.all(adjacency.data == 1))
assert (isinstance(adjacency, sparse.coo_matrix))
adjacency2 = spatio_temporal_src_adjacency(vol, n_times=2)
assert_equal(adjacency2.shape, (2 * n_vertices, 2 * n_vertices))
assert (np.all(adjacency2.data == 1))
@testing.requires_testing_data
def test_spatial_src_adjacency():
"""Test spatial adjacency functionality."""
# oct
src = read_source_spaces(fname_src)
assert src[0]['dist'] is not None # distance info
with pytest.warns(RuntimeWarning, match='will have holes'):
con = spatial_src_adjacency(src).toarray()
con_dist = spatial_src_adjacency(src, dist=0.01).toarray()
assert (con == con_dist).mean() > 0.75
# ico
src = read_source_spaces(fname_src_fs)
con = spatial_src_adjacency(src).tocsr()
con_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr()
assert con.shape == con_tris.shape
assert_array_equal(con.data, con_tris.data)
assert_array_equal(con.indptr, con_tris.indptr)
assert_array_equal(con.indices, con_tris.indices)
# one hemi
con_lh = spatial_src_adjacency(src[:1]).tocsr()
con_lh_tris = spatial_tris_adjacency(grade_to_tris(5)).tocsr()
con_lh_tris = con_lh_tris[:10242, :10242].tocsr()
assert_array_equal(con_lh.data, con_lh_tris.data)
assert_array_equal(con_lh.indptr, con_lh_tris.indptr)
assert_array_equal(con_lh.indices, con_lh_tris.indices)
@requires_sklearn
@requires_nibabel()
@testing.requires_testing_data
def test_vol_mask():
"""Test extraction of volume mask."""
src = read_source_spaces(fname_vsrc)
mask = _get_vol_mask(src)
# Let's use an alternative way that should be equivalent
vertices = [src[0]['vertno']]
n_vertices = len(vertices[0])
data = (1 + np.arange(n_vertices))[:, np.newaxis]
stc_tmp = VolSourceEstimate(data, vertices, tmin=0., tstep=1.)
img = stc_tmp.as_volume(src, mri_resolution=False)
img_data = _get_img_fdata(img)[:, :, :, 0].T
mask_nib = (img_data != 0)
assert_array_equal(img_data[mask_nib], data[:, 0])
assert_array_equal(np.where(mask_nib.ravel())[0], src[0]['vertno'])
assert_array_equal(mask, mask_nib)
assert_array_equal(img_data.shape, mask.shape)
@testing.requires_testing_data
def test_stc_near_sensors(tmpdir):
"""Test stc_near_sensors."""
info = read_info(fname_evoked)
# pick the left EEG sensors
picks = pick_types(info, meg=False, eeg=True, exclude=())
picks = [pick for pick in picks if info['chs'][pick]['loc'][0] < 0]
pick_info(info, picks, copy=False)
info['projs'] = []
info['bads'] = []
assert info['nchan'] == 33
evoked = EvokedArray(np.eye(info['nchan']), info)
trans = read_trans(fname_fwd)
assert trans['to'] == FIFF.FIFFV_COORD_HEAD
this_dir = str(tmpdir)
# testing does not have pial, so fake it
os.makedirs(op.join(this_dir, 'sample', 'surf'))
for hemi in ('lh', 'rh'):
copyfile(op.join(subjects_dir, 'sample', 'surf', f'{hemi}.white'),
op.join(this_dir, 'sample', 'surf', f'{hemi}.pial'))
# here we use a distance is smaller than the inter-sensor distance
kwargs = dict(subject='sample', trans=trans, subjects_dir=this_dir,
verbose=True, distance=0.005)
with pytest.raises(ValueError, match='No appropriate channels'):
stc_near_sensors(evoked, **kwargs)
evoked.set_channel_types({ch_name: 'ecog' for ch_name in evoked.ch_names})
with catch_logging() as log:
stc = stc_near_sensors(evoked, **kwargs)
log = log.getvalue()
assert 'Minimum projected intra-sensor distance: 7.' in log # 7.4
# this should be left-hemisphere dominant
assert 5000 > len(stc.vertices[0]) > 4000
assert 200 > len(stc.vertices[1]) > 100
# and at least one vertex should have the channel values
dists = cdist(stc.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(0).all()
src = read_source_spaces(fname_src) # uses "white" but should be okay
for s in src:
transform_surface_to(s, 'head', trans, copy=False)
assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
stc_src = stc_near_sensors(evoked, src=src, **kwargs)
assert len(stc_src.data) == 7928
with pytest.warns(RuntimeWarning, match='not included'): # some removed
stc_src_full = compute_source_morph(
stc_src, 'sample', 'sample', smooth=5, spacing=None,
subjects_dir=subjects_dir).apply(stc_src)
lh_idx = np.searchsorted(stc_src_full.vertices[0], stc.vertices[0])
rh_idx = np.searchsorted(stc_src_full.vertices[1], stc.vertices[1])
rh_idx += len(stc_src_full.vertices[0])
sub_data = stc_src_full.data[np.concatenate([lh_idx, rh_idx])]
assert sub_data.shape == stc.data.shape
corr = np.corrcoef(stc.data.ravel(), sub_data.ravel())[0, 1]
assert 0.6 < corr < 0.7
# now single-weighting mode
stc_w = stc_near_sensors(evoked, mode='single', **kwargs)
assert_array_less(stc_w.data, stc.data + 1e-3) # some tol
assert len(stc_w.data) == len(stc.data)
# at least one for each sensor should have projected right on it
dists = cdist(stc_w.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(0).all()
# finally, nearest mode: all should match
stc_n = stc_near_sensors(evoked, mode='nearest', **kwargs)
assert len(stc_n.data) == len(stc.data)
# at least one for each sensor should have projected right on it
dists = cdist(stc_n.data, evoked.data)
assert np.isclose(dists, 0., atol=1e-6).any(1).all() # all vert eq some ch
# these are EEG electrodes, so the distance 0.01 is too small for the
# scalp+skull. Even at a distance of 33 mm EEG 060 is too far:
with pytest.warns(RuntimeWarning, match='Channel missing in STC: EEG 060'):
stc = stc_near_sensors(evoked, trans, 'sample', subjects_dir=this_dir,
project=False, distance=0.033)
assert stc.data.any(0).sum() == len(evoked.ch_names) - 1
# and now with volumetric projection
src = read_source_spaces(fname_vsrc)
with catch_logging() as log:
stc_vol = stc_near_sensors(evoked, trans, 'sample', src=src,
subjects_dir=subjects_dir, verbose=True,
distance=0.033)
assert isinstance(stc_vol, VolSourceEstimate)
log = log.getvalue()
assert '4157 volume vertices' in log
@testing.requires_testing_data
def test_stc_near_sensors_picks():
"""Test using picks with stc_near_sensors."""
info = mne.io.read_raw_nirx(fname_nirx).info
evoked = mne.EvokedArray(np.ones((len(info['ch_names']), 1)), info)
src = mne.read_source_spaces(fname_src_fs)
kwargs = dict(
evoked=evoked, subject='fsaverage', trans='fsaverage',
subjects_dir=subjects_dir, src=src, project=True)
with pytest.raises(ValueError, match='No appropriate channels'):
stc_near_sensors(**kwargs)
picks = np.arange(len(info['ch_names']))
data = stc_near_sensors(picks=picks, **kwargs).data
assert len(data) == 20484
assert (data >= 0).all()
data = data[data > 0]
n_pts = len(data)
assert 500 < n_pts < 600
lo, hi = np.percentile(data, (5, 95))
assert 0.01 < lo < 0.1
assert 1.3 < hi < 1.7 # > 1
data = stc_near_sensors(picks=picks, mode='weighted', **kwargs).data
assert (data >= 0).all()
data = data[data > 0]
assert len(data) == n_pts
assert_array_equal(data, 1.) # values preserved
def _make_morph_map_hemi_same(subject_from, subject_to, subjects_dir,
reg_from, reg_to):
return _make_morph_map_hemi(subject_from, subject_from, subjects_dir,
reg_from, reg_from)
@requires_nibabel()
@testing.requires_testing_data
@pytest.mark.parametrize('kind', (
pytest.param('volume', marks=[requires_version('dipy')]),
'surface',
))
@pytest.mark.parametrize('scale', ((1.0, 0.8, 1.2), 1., 0.9))
def test_scale_morph_labels(kind, scale, monkeypatch, tmpdir):
"""Test label extraction, morphing, and MRI scaling relationships."""
tempdir = str(tmpdir)
subject_from = 'sample'
subject_to = 'small'
testing_dir = op.join(subjects_dir, subject_from)
from_dir = op.join(tempdir, subject_from)
for root in ('mri', 'surf', 'label', 'bem'):
os.makedirs(op.join(from_dir, root), exist_ok=True)
for hemi in ('lh', 'rh'):
for root, fname in (('surf', 'sphere'), ('surf', 'white'),
('surf', 'sphere.reg'),
('label', 'aparc.annot')):
use_fname = op.join(root, f'{hemi}.{fname}')
copyfile(op.join(testing_dir, use_fname),
op.join(from_dir, use_fname))
for root, fname in (('mri', 'aseg.mgz'), ('mri', 'brain.mgz')):
use_fname = op.join(root, fname)
copyfile(op.join(testing_dir, use_fname),
op.join(from_dir, use_fname))
del testing_dir
if kind == 'surface':
src_from = read_source_spaces(fname_src_3)
assert src_from[0]['dist'] is None
assert src_from[0]['nearest'] is not None
# avoid patch calc
src_from[0]['nearest'] = src_from[1]['nearest'] = None
assert len(src_from) == 2
assert src_from[0]['nuse'] == src_from[1]['nuse'] == 258
klass = SourceEstimate
labels_from = read_labels_from_annot(
subject_from, subjects_dir=tempdir)
n_labels = len(labels_from)
write_source_spaces(op.join(tempdir, subject_from, 'bem',
f'{subject_from}-oct-4-src.fif'), src_from)
else:
assert kind == 'volume'
pytest.importorskip('dipy')
src_from = read_source_spaces(fname_src_vol)
src_from[0]['subject_his_id'] = subject_from
labels_from = op.join(
tempdir, subject_from, 'mri', 'aseg.mgz')
n_labels = 46
assert op.isfile(labels_from)
klass = VolSourceEstimate
assert len(src_from) == 1
assert src_from[0]['nuse'] == 4157
write_source_spaces(
op.join(from_dir, 'bem', 'sample-vol20-src.fif'), src_from)
scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir,
annot=True, skip_fiducials=True, verbose=True,
overwrite=True)
if kind == 'surface':
src_to = read_source_spaces(
op.join(tempdir, subject_to, 'bem',
f'{subject_to}-oct-4-src.fif'))
labels_to = read_labels_from_annot(
subject_to, subjects_dir=tempdir)
# Save time since we know these subjects are identical
monkeypatch.setattr(mne.morph_map, '_make_morph_map_hemi',
_make_morph_map_hemi_same)
else:
src_to = read_source_spaces(
op.join(tempdir, subject_to, 'bem',
f'{subject_to}-vol20-src.fif'))
labels_to = op.join(
tempdir, subject_to, 'mri', 'aseg.mgz')
# 1. Label->STC->Label for the given subject should be identity
# (for surfaces at least; for volumes it's not as clean as this
# due to interpolation)
n_times = 50
rng = np.random.RandomState(0)
label_tc = rng.randn(n_labels, n_times)
# check that a random permutation of our labels yields a terrible
# correlation
corr = np.corrcoef(label_tc.ravel(),
rng.permutation(label_tc).ravel())[0, 1]
assert -0.06 < corr < 0.06
# project label activations to full source space
with pytest.raises(ValueError, match='subject'):
labels_to_stc(labels_from, label_tc, src=src_from, subject='foo')
stc = labels_to_stc(labels_from, label_tc, src=src_from)
assert stc.subject == 'sample'
assert isinstance(stc, klass)
label_tc_from = extract_label_time_course(
stc, labels_from, src_from, mode='mean')
if kind == 'surface':
assert_allclose(label_tc, label_tc_from, rtol=1e-12, atol=1e-12)
else:
corr = np.corrcoef(label_tc.ravel(), label_tc_from.ravel())[0, 1]
assert 0.93 < corr < 0.95
#
# 2. Changing STC subject to the surrogate and then extracting
#
stc.subject = subject_to
label_tc_to = extract_label_time_course(
stc, labels_to, src_to, mode='mean')
assert_allclose(label_tc_from, label_tc_to, rtol=1e-12, atol=1e-12)
stc.subject = subject_from
#
# 3. Morphing STC to new subject then extracting
#
if isinstance(scale, tuple) and kind == 'volume':
ctx = nullcontext()
test_morph = True
elif kind == 'surface':
ctx = pytest.warns(RuntimeWarning, match='not included')
test_morph = True
else:
ctx = nullcontext()
test_morph = True
with ctx: # vertices not included
morph = compute_source_morph(
src_from, subject_to=subject_to, src_to=src_to,
subjects_dir=tempdir, niter_sdr=(), smooth=1,
zooms=14., verbose=True) # speed up with higher zooms
if kind == 'volume':
got_affine = morph.pre_affine.affine
want_affine = np.eye(4)
want_affine.ravel()[::5][:3] = 1. / np.array(scale, float)
# just a scaling (to within 1% if zooms=None, 20% with zooms=10)
assert_allclose(want_affine[:, :3], got_affine[:, :3], atol=2e-1)
assert got_affine[3, 3] == 1.
# little translation (to within `limit` mm)
move = np.linalg.norm(got_affine[:3, 3])
limit = 2. if scale == 1. else 12
assert move < limit, scale
if test_morph:
stc_to = morph.apply(stc)
label_tc_to_morph = extract_label_time_course(
stc_to, labels_to, src_to, mode='mean')
if kind == 'volume':
corr = np.corrcoef(
label_tc.ravel(), label_tc_to_morph.ravel())[0, 1]
if isinstance(scale, tuple):
# some other fixed constant
# min_, max_ = 0.84, 0.855 # zooms='auto' values
min_, max_ = 0.57, 0.67
elif scale == 1:
# min_, max_ = 0.85, 0.875 # zooms='auto' values
min_, max_ = 0.72, 0.75
else:
# min_, max_ = 0.84, 0.855 # zooms='auto' values
min_, max_ = 0.61, 0.62
assert min_ < corr <= max_, scale
else:
assert_allclose(
label_tc, label_tc_to_morph, atol=1e-12, rtol=1e-12)
#
# 4. The same round trip from (1) but in the warped space
#
stc = labels_to_stc(labels_to, label_tc, src=src_to)
assert isinstance(stc, klass)
label_tc_to = extract_label_time_course(
stc, labels_to, src_to, mode='mean')
if kind == 'surface':
assert_allclose(label_tc, label_tc_to, rtol=1e-12, atol=1e-12)
else:
corr = np.corrcoef(label_tc.ravel(), label_tc_to.ravel())[0, 1]
assert 0.93 < corr < 0.96, scale
@testing.requires_testing_data
@pytest.mark.parametrize('kind', [
'surface',
pytest.param('volume', marks=[pytest.mark.slowtest,
requires_version('nibabel')]),
])
def test_label_extraction_subject(kind):
"""Test that label extraction subject is treated properly."""
if kind == 'surface':
inv = read_inverse_operator(fname_inv)
labels = read_labels_from_annot(
'sample', subjects_dir=subjects_dir)
labels_fs = read_labels_from_annot(
'fsaverage', subjects_dir=subjects_dir)
labels_fs = [label for label in labels_fs
if not label.name.startswith('unknown')]
assert all(label.subject == 'sample' for label in labels)
assert all(label.subject == 'fsaverage' for label in labels_fs)
assert len(labels) == len(labels_fs) == 68
n_labels = 68
else:
assert kind == 'volume'
inv = read_inverse_operator(fname_inv_vol)
inv['src'][0]['subject_his_id'] = 'sample' # modernize
labels = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
labels_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz')
n_labels = 46
src = inv['src']
assert src.kind == kind
assert src._subject == 'sample'
ave = read_evokeds(fname_evoked)[0].apply_baseline((None, 0)).crop(0, 0.01)
assert len(ave.times) == 4
stc = apply_inverse(ave, inv)
assert stc.subject == 'sample'
ltc = extract_label_time_course(stc, labels, src)
stc.subject = 'fsaverage'
with pytest.raises(ValueError, match=r'source spac.*not match.* stc\.sub'):
extract_label_time_course(stc, labels, src)
stc.subject = 'sample'
assert ltc.shape == (n_labels, 4)
if kind == 'volume':
with pytest.raises(RuntimeError, match='atlas.*not match.*source spa'):
extract_label_time_course(stc, labels_fs, src)
else:
with pytest.raises(ValueError, match=r'label\.sub.*not match.* stc\.'):
extract_label_time_course(stc, labels_fs, src)
stc.subject = None
with pytest.raises(ValueError, match=r'label\.sub.*not match.* sourc'):
extract_label_time_course(stc, labels_fs, src)
|
{
"content_hash": "be0ddd468eaf877982e0ce4ecfde97a5",
"timestamp": "",
"source": "github",
"line_count": 1870,
"max_line_length": 79,
"avg_line_length": 40.87967914438503,
"alnum_prop": 0.5975407155471254,
"repo_name": "rkmaddox/mne-python",
"id": "23a2c3b7f608d4b5813000a77df813ad0c9acb33",
"size": "76498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/tests/test_source_estimate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3114"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4400215"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1beta1
def sample_delete_custom_job():
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteCustomJobRequest(
name="name_value",
)
# Make the request
operation = client.delete_custom_job(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync]
|
{
"content_hash": "efe5772cc3b6a88df426068574713c15",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 24.52173913043478,
"alnum_prop": 0.7127659574468085,
"repo_name": "googleapis/python-aiplatform",
"id": "ec6f56faf55d2f0b7899c68c7e91be455b52dd18",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
from django.conf import settings as django_settings
from django.test.signals import setting_changed
DEFAULTS = {
# wait one second between retries to obtain lock on metric timeslice
'TIMESLICE_LOCK_RETRY_DELAY_MS': 1000,
# base url for newrelic plugin api
'NEWRELIC_BASE_URL': 'https://platform-api.newrelic.com',
# license key for newrelic account
'NEWRELIC_LICENSE_KEY': 'keyboardcat',
}
class Settings(object):
def __init__(self, user_settings=None):
if user_settings:
self._user_settings = user_settings
self.defaults = DEFAULTS
@property
def user_settings(self):
# cache settings
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(django_settings, 'NEWRELIC_PLUGIN_AGENT', {})
return self._user_settings
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid newrelic agent setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Cache the result
setattr(self, attr, val)
return val
settings = Settings()
def reload_api_settings(*args, **kwargs):
global settings
setting, value = kwargs['setting'], kwargs['value']
if setting == 'NEWRELIC_PLUGIN_AGENT':
settings = Settings(value)
setting_changed.connect(reload_api_settings)
|
{
"content_hash": "5d59a1b9a6f941c5ea83bde5d8541f8c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 29.132075471698112,
"alnum_prop": 0.6366580310880829,
"repo_name": "ambitioninc/django-newrelic-plugin-agent",
"id": "84395254d3f53127ea27a22737092f2a820332b1",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newrelic_plugin_agent/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30107"
}
],
"symlink_target": ""
}
|
import os
from os.path import abspath, dirname
import tornado.web
import tornado.httpserver
import tornado.ioloop
from tornado.log import app_log
from tornado.options import define, options
from mongoengine import connect
from tt.handle import MainHandler, MongoBackboneHandler, LoginHandler, RegisterHandler, LogoutHandler
PROJECT_DIR = dirname(dirname(abspath(__file__)))
TEMPLATE_DIR = os.path.join(PROJECT_DIR, 'templates')
STATIC_DIR = os.path.join(PROJECT_DIR, 'static')
CONF_DIR = os.path.join(PROJECT_DIR, 'conf')
CONF_FILE = CONF_DIR+os.path.sep+"application.conf"
define("debug", default=True, type=bool)
define("port", default=8181, type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = {
(r'/', MainHandler),
(r'/sigin', LoginHandler),
(r'/sigup', RegisterHandler),
(r'/sigout',LogoutHandler),
(r'/rest/([a-z]+)', MongoBackboneHandler),
(r'/rest/([a-z]+)/(.+)', MongoBackboneHandler)
}
settings = dict(
template_path=TEMPLATE_DIR,
static_path=STATIC_DIR,
login_url="/sigin",
register_url='/sigup',
logout_url='/sigout',
debug=options.debug,
cookie_secret="123456"
)
connect('test', host="mongodb://localhost:27017")
tornado.web.Application.__init__(self, handlers, **settings)
def run():
tornado.options.parse_command_line()
tornado.options.parse_config_file(CONF_FILE)
port = os.environ.get("PORT", options.port)
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(port)
app_log.info("application run on {0}".format(port))
tornado.ioloop.IOLoop.instance().start()
|
{
"content_hash": "08dc40355f7cf2f0c1988bb2ddb50483",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 101,
"avg_line_length": 32.228070175438596,
"alnum_prop": 0.6260206859009254,
"repo_name": "yunlzheng/tomatodo",
"id": "31d974a4346ace40a3ea581968d1172705439585",
"size": "1854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tt/application.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5671"
},
{
"name": "JavaScript",
"bytes": "123035"
},
{
"name": "Python",
"bytes": "9886"
}
],
"symlink_target": ""
}
|
import winsound
for i in range(100, 2000, 100):
winsound.Beep(i, 75)
print "Hopefully you heard some sounds increasing in frequency!"
|
{
"content_hash": "b43c26e8c3405e4e94bf0b75d186fb6a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 64,
"avg_line_length": 27.8,
"alnum_prop": 0.7410071942446043,
"repo_name": "MalloyPower/parsing-python",
"id": "7b4fa15c0a68acfdef6db29db70bd9b40e6bed4c",
"size": "203",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/test/test_winsound.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import os, os.path, popen2, re, string, sys
def textfile(file):
return {
"svn:eol-style" : "native"
}
def script(file):
return {
"svn:eol-style" : "native",
"svn:executable" : "*"
}
def executable(file):
return {
"svn:executable" : "*",
"svn:mime-type" : "application/octet-stream"
}
def binary(file):
return {
"svn:mime-type" : "application/octet-stream"
}
def is_binary(file):
f = open(file)
data = f.read()
f.close()
for c in data:
if c not in string.printable:
return True
return False
def binary_or_text(file):
if is_binary(file):
return binary(file)
else:
return textfile(file)
property_map = {
".asm" : textfile,
".bat" : script,
".cfg" : textfile,
".cg" : textfile,
".compositor" : textfile,
".config" : textfile,
".controls" : textfile,
".cs" : textfile,
".csproj" : textfile,
".dat" : binary_or_text,
".dds" : binary,
".dll" : binary,
".dylib" : binary,
".example" : textfile,
".exe" : executable,
".font" : textfile,
".fontdef" : textfile,
".frag" : textfile,
".fxcop" : textfile,
".glsl" : textfile,
".hlsl" : textfile,
".html" : textfile,
".ico" : binary,
".imageset" : textfile,
".inc" : textfile,
".include" : textfile,
".ini" : textfile,
".j2c" : binary,
".jp2" : binary,
".jpg" : binary,
".layout" : textfile,
".looknfeel" : textfile,
".lsl" : textfile,
".material" : textfile,
".mdp" : textfile,
".mds" : textfile,
".nsi" : textfile,
".overlay" : textfile,
".particle" : textfile,
".php" : script,
".pidb" : binary,
".pl" : script,
".png" : binary,
".program" : textfile,
".py" : script,
".rb" : script,
".resx" : textfile,
".scheme" : textfile,
".settings" : textfile,
".sh" : script,
".snk" : binary,
".so" : binary,
".sql" : textfile,
".stetic" : textfile,
".template" : textfile,
".tga" : binary,
".ttf" : binary,
".txt" : textfile,
".userprefs" : textfile,
".usertasks" : textfile,
".vert" : textfile,
".xml" : textfile,
".xsd" : textfile,
".zip" : binary
}
def propset(file, property, value):
os.system('svn propset %s "%s" "%s"' % (property, value, file))
def propdel(file, property):
os.system('svn propdel %s "%s"' % (property, file))
def propget(file, property):
output, input, error = popen2.popen3('svn propget %s "%s"' % (property, file))
err = error.read()
if err != "":
output.close()
error.close()
input.close()
return ""
result = output.read()
output.close()
error.close()
input.close()
return result.strip()
def proplist(file):
output, input, error = popen2.popen3('svn proplist "%s"' % file)
err = error.read()
if err != "":
output.close()
error.close()
input.close()
return None
result = output.readlines()
output.close()
error.close()
input.close()
if len(result) > 0 and re.match("^Properties on .*:$", result[0]) is not None:
return [r.strip() for r in result[1:]]
else:
return ""
def update_file(file, properties):
current_props = proplist(file)
if current_props is None:
# svn error occurred -- probably an unversioned file
return
for p in current_props:
if not properties.has_key(p):
propdel(file, p)
for p in properties:
if p not in current_props or propget(file, p) != properties[p]:
propset(file, p, properties[p])
def update(dir):
for f in os.listdir(dir):
fullpath = os.path.join(dir, f)
if os.path.isdir(fullpath):
if not os.path.islink(fullpath):
update(fullpath)
else:
extension = os.path.splitext(fullpath)[1].lower()
if property_map.has_key(extension):
update_file(fullpath, property_map[extension](fullpath))
elif extension != "" and proplist(fullpath) is not None:
print "Warning: No properties defined for %s files (%s)" % (extension, fullpath)
def main(argv = None):
if argv is None:
argv = sys.argv
update(".")
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "464a7f4280686545e148401445b13741",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 96,
"avg_line_length": 24.90810810810811,
"alnum_prop": 0.5184461805555556,
"repo_name": "jimmygkr/openviewer",
"id": "3fb1e466ba39164a0f88c559bb3ecb5f985f8f9c",
"size": "4631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "update-svn-properties.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C#",
"bytes": "30067"
},
{
"name": "Python",
"bytes": "4631"
},
{
"name": "Shell",
"bytes": "3390"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
from decisions.news.models import Entry
def entry(request, year, object_id, slug):
e = get_object_or_404(Entry, pub_date__year=year, pk=object_id, slug=slug)
return render(request, "news/entry.html", {"entry": e})
|
{
"content_hash": "c906f0f7d6e82fabb5ad080d7ff066b9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.7153024911032029,
"repo_name": "okffi/decisions",
"id": "9bd3bc7cb305918d21693044b7172027cdacea85",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/decisions/news/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "HTML",
"bytes": "11708"
},
{
"name": "JavaScript",
"bytes": "3885"
},
{
"name": "Python",
"bytes": "49063"
}
],
"symlink_target": ""
}
|
"""Labels requests according to the type of content they represent."""
import collections
import logging
import operator
import os
import urlparse
import loading_trace
import request_track
class ContentClassificationLens(object):
"""Associates requests and frames with the type of content they represent."""
def __init__(self, trace, ad_rules, tracking_rules):
"""Initializes an instance of ContentClassificationLens.
Args:
trace: (LoadingTrace) loading trace.
ad_rules: ([str]) List of Adblock+ compatible rules used to classify ads.
tracking_rules: ([str]) List of Adblock+ compatible rules used to
classify tracking and analytics.
"""
self._trace = trace
self._requests = trace.request_track.GetEvents()
self._requests_by_id = {r.request_id: r for r in self._requests}
self._main_frame_id = trace.page_track.GetEvents()[0]['frame_id']
self._frame_to_requests = collections.defaultdict(list)
self._ad_requests = set()
self._tracking_requests = set()
self._ad_matcher = _RulesMatcher(ad_rules, True)
self._tracking_matcher = _RulesMatcher(tracking_rules, True)
self._document_url = self._GetDocumentUrl()
self._GroupRequestsByFrameId()
self._LabelRequests()
def IsAdRequest(self, request):
"""Returns True iff the request matches one of the ad_rules."""
return request.request_id in self._ad_requests
def IsTrackingRequest(self, request):
"""Returns True iff the request matches one of the tracking_rules."""
return request.request_id in self._tracking_requests
def IsAdOrTrackingFrame(self, frame_id):
"""A Frame is an Ad frame if it's not the main frame and its main resource
is ad or tracking-related.
"""
if (frame_id not in self._frame_to_requests
or frame_id == self._main_frame_id):
return False
frame_requests = [self._requests_by_id[request_id]
for request_id in self._frame_to_requests[frame_id]]
sorted_frame_resources = sorted(
frame_requests, key=operator.attrgetter('start_msec'))
frame_main_resource = sorted_frame_resources[0]
return (frame_main_resource.request_id in self._ad_requests
or frame_main_resource.request_id in self._tracking_requests)
def AdAndTrackingRequests(self):
"""Returns a list of requests linked to ads and tracking.
Returns the union of:
- Requests tagged as ad or tracking.
- Requests originating from an ad frame.
"""
frame_ids = {r.frame_id for r in self._requests}
ad_frame_ids = filter(self.IsAdOrTrackingFrame, frame_ids)
return filter(lambda r: self.IsAdRequest(r) or self.IsTrackingRequest(r)
or r.frame_id in ad_frame_ids, self._requests)
@classmethod
def WithRulesFiles(cls, trace, ad_rules_filename, tracking_rules_filename):
"""Returns an instance of ContentClassificationLens with the rules read
from files.
"""
ad_rules = []
tracking_rules = []
if os.path.exists(ad_rules_filename):
ad_rules = open(ad_rules_filename, 'r').readlines()
if os.path.exists(tracking_rules_filename):
tracking_rules = open(tracking_rules_filename, 'r').readlines()
return ContentClassificationLens(trace, ad_rules, tracking_rules)
def _GroupRequestsByFrameId(self):
for request in self._requests:
frame_id = request.frame_id
self._frame_to_requests[frame_id].append(request.request_id)
def _LabelRequests(self):
for request in self._requests:
request_id = request.request_id
if self._ad_matcher.Matches(request, self._document_url):
self._ad_requests.add(request_id)
if self._tracking_matcher.Matches(request, self._document_url):
self._tracking_requests.add(request_id)
def _GetDocumentUrl(self):
main_frame_id = self._trace.page_track.GetMainFrameId()
# Take the last one as JS redirects can change the document URL.
document_url = None
for r in self._requests:
# 304: not modified.
if r.frame_id == main_frame_id and r.status in (200, 304):
document_url = r.document_url
return document_url
class _RulesMatcher(object):
"""Matches requests with rules in Adblock+ format."""
_WHITELIST_PREFIX = '@@'
_RESOURCE_TYPE_TO_OPTIONS_KEY = {
'Script': 'script', 'Stylesheet': 'stylesheet', 'Image': 'image',
'XHR': 'xmlhttprequest'}
def __init__(self, rules, no_whitelist):
"""Initializes an instance of _RulesMatcher.
Args:
rules: ([str]) list of rules.
no_whitelist: (bool) Whether the whitelisting rules should be ignored.
"""
self._rules = self._FilterRules(rules, no_whitelist)
if self._rules:
try:
import adblockparser
self._matcher = adblockparser.AdblockRules(self._rules)
except ImportError:
logging.critical('Likely you need to install adblockparser. Try:\n'
' pip install --user adblockparser\n'
'For 10-100x better performance, also try:\n'
" pip install --user 're2 >= 0.2.21'")
raise
else:
self._matcher = None
def Matches(self, request, document_url):
"""Returns whether a request matches one of the rules."""
if self._matcher is None:
return False
url = request.url
return self._matcher.should_block(
url, self._GetOptions(request, document_url))
@classmethod
def _GetOptions(cls, request, document_url):
options = {}
resource_type = request.resource_type
option = cls._RESOURCE_TYPE_TO_OPTIONS_KEY.get(resource_type)
if option:
options[option] = True
if cls._IsThirdParty(request.url, document_url):
options['third-party'] = True
return options
@classmethod
def _FilterRules(cls, rules, no_whitelist):
if not no_whitelist:
return rules
else:
return [rule for rule in rules
if not rule.startswith(cls._WHITELIST_PREFIX)]
@classmethod
def _IsThirdParty(cls, url, document_url):
# Common definition of "third-party" is "not from the same TLD+1".
# Unfortunately, knowing what is a TLD is not trivial. To do it without a
# database, we use the following simple (and incorrect) rules:
# - co.{in,uk,jp,hk} is a TLD
# - com.{au,hk} is a TLD
# Otherwise, this is the part after the last dot.
return cls._GetTldPlusOne(url) != cls._GetTldPlusOne(document_url)
@classmethod
def _GetTldPlusOne(cls, url):
hostname = urlparse.urlparse(url).hostname
if not hostname:
return hostname
parts = hostname.split('.')
if len(parts) <= 2:
return hostname
tld_parts_count = 1
may_be_tld = parts[-2:]
if may_be_tld[0] == 'co' and may_be_tld[1] in ('in', 'uk', 'jp'):
tld_parts_count = 2
elif may_be_tld[0] == 'com' and may_be_tld[1] in ('au', 'hk'):
tld_parts_count = 2
tld_plus_one = '.'.join(parts[-(tld_parts_count + 1):])
return tld_plus_one
|
{
"content_hash": "a3c21d062bf76696ca388ac98e158a3b",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 37.20744680851064,
"alnum_prop": 0.6607576840600429,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "89f40e2fd2fab174a574c7722b541c91f0bc3366",
"size": "7158",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tools/android/loading/content_classification_lens.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from ledge.core import Ledge
from ledge import backends
from ledge import serializers
|
{
"content_hash": "c66980f0a3a17ce961f3420d36808e3c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 29,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.8488372093023255,
"repo_name": "j4mie/ledge",
"id": "40da89f53fe8a3c8990c0c6e07a8e153d08f5fde",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ledge/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3039"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python2
# -*- Mode: Python; py-indent-offset: 8 -*-
# (C) Copyright Zack Rusin 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Zack Rusin <zack@kde.org>
import license
import gl_XML
import sys, getopt
class PrintGlEnums(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_enums.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2005 Brian Paul All Rights Reserved.""", "BRIAN PAUL")
self.enum_table = {}
def printRealHeader(self):
print '#include "main/glheader.h"'
print '#include "main/enums.h"'
print '#include "main/imports.h"'
print '#include "main/mtypes.h"'
print ''
print 'typedef struct PACKED {'
print ' uint16_t offset;'
print ' int n;'
print '} enum_elt;'
print ''
return
def print_code(self):
print """
typedef int (*cfunc)(const void *, const void *);
/**
* Compare a key enum value to an element in the \c enum_string_table_offsets array.
*
* \c bsearch always passes the key as the first parameter and the pointer
* to the array element as the second parameter. We can elimiate some
* extra work by taking advantage of that fact.
*
* \param a Pointer to the desired enum name.
* \param b Pointer into the \c enum_string_table_offsets array.
*/
static int compar_nr( const int *a, enum_elt *b )
{
return a[0] - b->n;
}
static char token_tmp[20];
const char *_mesa_lookup_enum_by_nr( int nr )
{
enum_elt *elt;
STATIC_ASSERT(sizeof(enum_string_table) < (1 << 16));
elt = _mesa_bsearch(& nr, enum_string_table_offsets,
Elements(enum_string_table_offsets),
sizeof(enum_string_table_offsets[0]),
(cfunc) compar_nr);
if (elt != NULL) {
return &enum_string_table[elt->offset];
}
else {
/* this is not re-entrant safe, no big deal here */
_mesa_snprintf(token_tmp, sizeof(token_tmp) - 1, "0x%x", nr);
token_tmp[sizeof(token_tmp) - 1] = '\\0';
return token_tmp;
}
}
/**
* Primitive names
*/
static const char *prim_names[PRIM_MAX+3] = {
"GL_POINTS",
"GL_LINES",
"GL_LINE_LOOP",
"GL_LINE_STRIP",
"GL_TRIANGLES",
"GL_TRIANGLE_STRIP",
"GL_TRIANGLE_FAN",
"GL_QUADS",
"GL_QUAD_STRIP",
"GL_POLYGON",
"GL_LINES_ADJACENCY",
"GL_LINE_STRIP_ADJACENCY",
"GL_TRIANGLES_ADJACENCY",
"GL_TRIANGLE_STRIP_ADJACENCY",
"outside begin/end",
"unknown state"
};
/* Get the name of an enum given that it is a primitive type. Avoids
* GL_FALSE/GL_POINTS ambiguity and others.
*/
const char *
_mesa_lookup_prim_by_nr(GLuint nr)
{
if (nr < Elements(prim_names))
return prim_names[nr];
else
return "invalid mode";
}
"""
return
def printBody(self, api_list):
self.enum_table = {}
for api in api_list:
self.process_enums( api )
enum_table = []
for enum in sorted(self.enum_table.keys()):
low_pri = 9
best_name = ''
for [name, pri] in self.enum_table[ enum ]:
if pri < low_pri:
low_pri = pri
best_name = name
enum_table.append((enum, best_name))
string_offsets = {}
i = 0;
print 'LONGSTRING static const char enum_string_table[] = '
for enum, name in enum_table:
print ' "%s\\0"' % (name)
string_offsets[ enum ] = i
i += len(name) + 1
print ' ;'
print ''
print 'static const enum_elt enum_string_table_offsets[%u] =' % (len(enum_table))
print '{'
for enum, name in enum_table:
print ' { %5u, 0x%08X }, /* %s */' % (string_offsets[enum], enum, name)
print '};'
print ''
self.print_code()
return
def process_enums(self, api):
for obj in api.enumIterateByName():
if obj.value not in self.enum_table:
self.enum_table[ obj.value ] = []
enum = self.enum_table[ obj.value ]
name = "GL_" + obj.name
priority = obj.priority()
already_in = False;
for n, p in enum:
if n == name:
already_in = True
if not already_in:
enum.append( [name, priority] )
def show_usage():
print "Usage: %s [-f input_file_name]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:")
except Exception,e:
show_usage()
api_list = []
for (arg,val) in args:
if arg == "-f":
api = gl_XML.parse_GL_API( val )
api_list.append(api);
printer = PrintGlEnums()
printer.Print( api_list )
|
{
"content_hash": "97ec9ad0f4cd0fd2c3a1772a47b1cb7c",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 89,
"avg_line_length": 27.911627906976744,
"alnum_prop": 0.5880686552241293,
"repo_name": "execunix/vinos",
"id": "0214932b63a16140cc96e2ecdf14a617c2b008a0",
"size": "6001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xsrc/external/mit/MesaLib/dist/src/mapi/glapi/gen/gl_enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Command-line interface to inspect and execute a graph in a SavedModel.
If TensorFlow is installed on your system through pip, the 'saved_model_cli'
binary can be invoked directly from command line.
At a high level, SavedModel CLI allows users to both inspect and execute
computations on a MetaGraphDef in a SavedModel. These are done through `show`
and `run` commands. Following is the usage of the two commands. SavedModel
CLI will also display these information with -h option.
'show' command usage: saved_model_cli show [-h] --dir DIR [--tag_set TAG_SET]
[--signature_def SIGNATURE_DEF_KEY]
Examples:
To show all available tag-sets in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model
To show all available SignatureDef keys in a MetaGraphDef specified by its
tag-set:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
For a MetaGraphDef with multiple tags in the tag-set, all tags must be passed
in, separated by ',':
$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu
To show all inputs and outputs TensorInfo for a specific SignatureDef specified
by the SignatureDef key in a MetaGraphDef:
$saved_model_cli show --dir /tmp/saved_model --tag_set serve
--signature_def serving_default
Example output:
The given SavedModel SignatureDef contains the following input(s):
inputs['input0'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
inputs['input1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
The given SavedModel SignatureDef contains the following output(s):
outputs['output'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
Method name is: tensorflow/serving/regress
To show all available information in the SavedModel:
$saved_model_cli show --dir /tmp/saved_model --all
'run' command usage: saved_model_cli run [-h] --dir DIR --tag_set TAG_SET
--signature_def SIGNATURE_DEF_KEY --inputs INPUTS
[--outdir OUTDIR] [--overwrite]
Examples:
To run input tensors from files through a MetaGraphDef and save the output
tensors to files:
$saved_model_cli run --dir /tmp/saved_model --tag_set serve
--signature_def serving_default --inputs x:0=/tmp/124.npz,x2=/tmp/123.npy
--outdir /tmp/out
To observe the intermediate Tensor values in the runtime graph, use the
--tf_debug flag, e.g.:
$saved_model_cli run --dir /tmp/saved_model --tag_set serve
--signature_def serving_default --inputs x:0=/tmp/124.npz,x2=/tmp/123.npy
--outdir /tmp/out --tf_debug
To build this tool from source, run:
$bazel build tensorflow/python/tools:saved_model_cli
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import warnings
import numpy as np
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import app
from tensorflow.python.saved_model import loader
def _show_tag_sets(saved_model_dir):
"""Prints the tag-sets stored in SavedModel directory.
Prints all the tag-sets for MetaGraphs stored in SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
print('The given SavedModel contains the following tag-sets:')
for tag_set in sorted(tag_sets):
print(', '.join(sorted(tag_set)))
def _show_signature_def_map_keys(saved_model_dir, tag_set):
"""Prints the keys for each SignatureDef in the SignatureDef map.
Prints the list of SignatureDef keys from the SignatureDef map specified by
the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,
in string format, separated by ','. For tag-set contains multiple tags,
all tags must be passed in.
"""
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
print('The given SavedModel MetaGraphDef contains SignatureDefs with the '
'following keys:')
for signature_def_key in sorted(signature_def_map.keys()):
print('SignatureDef key: \"%s\"' % signature_def_key)
def _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfo for all inputs of the SignatureDef.
Returns a dictionary that maps each input key to its TensorInfo for the given
signature_def_key in the meta_graph_def
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to
look up SignatureDef key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps input tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).inputs
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
signature_def_key):
"""Gets TensorInfos for all outputs of the SignatureDef.
Returns a dictionary that maps each output key to its TensorInfo for the given
signature_def_key in the meta_graph_def.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
look up signature_def_key.
signature_def_key: A SignatureDef key string.
Returns:
A dictionary that maps output tensor keys to TensorInfos.
"""
return signature_def_utils.get_signature_def_by_key(meta_graph_def,
signature_def_key).outputs
def _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key):
"""Prints input and output TensorInfos.
Prints the details of input and output TensorInfos for the SignatureDef mapped
by the given signature_def_key.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by
','. For tag-set contains multiple tags, all tags must be passed in.
signature_def_key: A SignatureDef key string.
"""
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
print('The given SavedModel SignatureDef contains the following input(s):')
for input_key, input_tensor in sorted(inputs_tensor_info.items()):
print('inputs[\'%s\'] tensor_info:' % input_key)
_print_tensor_info(input_tensor)
print('The given SavedModel SignatureDef contains the following output(s):')
for output_key, output_tensor in sorted(outputs_tensor_info.items()):
print('outputs[\'%s\'] tensor_info:' % output_key)
_print_tensor_info(output_tensor)
print('Method name is: %s' %
meta_graph_def.signature_def[signature_def_key].method_name)
def _print_tensor_info(tensor_info):
"""Prints details of the given tensor_info.
Args:
tensor_info: TensorInfo object to be printed.
"""
print(' dtype: ' + types_pb2.DataType.keys()[tensor_info.dtype])
# Display shape as tuple.
if tensor_info.tensor_shape.unknown_rank:
shape = 'unknown_rank'
else:
dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]
shape = ', '.join(dims)
shape = '(' + shape + ')'
print(' shape: ' + shape)
def _show_all(saved_model_dir):
"""Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.
Prints all tag-set, SignatureDef and Inputs/Outputs information stored in
SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect.
"""
tag_sets = reader.get_saved_model_tag_sets(saved_model_dir)
for tag_set in sorted(tag_sets):
tag_set = ', '.join(tag_set)
print('\nMetaGraphDef with tag-set: \'' + tag_set +
'\' contains the following SignatureDefs:')
signature_def_map = get_signature_def_map(saved_model_dir, tag_set)
for signature_def_key in sorted(signature_def_map.keys()):
print('\nsignature_def[\'' + signature_def_key + '\']:')
_show_inputs_outputs(saved_model_dir, tag_set, signature_def_key)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Gets MetaGraphDef from SavedModel.
Returns the MetaGraphDef for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Raises:
RuntimeError: An error when the given tag-set does not exist in the
SavedModel.
Returns:
A MetaGraphDef corresponding to the tag-set.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set(tag_set.split(','))
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise RuntimeError('MetaGraphDef associated with tag-set ' + tag_set +
' could not be found in SavedModel')
def get_signature_def_map(saved_model_dir, tag_set):
"""Gets SignatureDef map from a MetaGraphDef in a SavedModel.
Returns the SignatureDef map for the given tag-set in the SavedModel
directory.
Args:
saved_model_dir: Directory containing the SavedModel to inspect or execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
Returns:
A SignatureDef map that maps from string keys to SignatureDefs.
"""
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
return meta_graph.signature_def
def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,
input_tensor_key_feed_dict, outdir,
overwrite_flag, tf_debug=False):
"""Runs SavedModel and fetch all outputs.
Runs the input dictionary through the MetaGraphDef within a SavedModel
specified by the given tag_set and SignatureDef. Also save the outputs to file
if outdir is not None.
Args:
saved_model_dir: Directory containing the SavedModel to execute.
tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in
string format, separated by ','. For tag-set contains multiple tags, all
tags must be passed in.
signature_def_key: A SignatureDef key string.
input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.
outdir: A directory to save the outputs to. If the directory doesn't exist,
it will be created.
overwrite_flag: A boolean flag to allow overwrite output file if file with
the same name exists.
tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the
intermediate Tensor values and runtime GraphDefs while running the
SavedModel.
Raises:
RuntimeError: An error when output file already exists and overwrite is not
enabled.
"""
# Get a list of output tensor names.
meta_graph_def = get_meta_graph_def(saved_model_dir, tag_set)
# Re-create feed_dict based on input tensor name instead of key as session.run
# uses tensor name.
inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
inputs_feed_dict = {
inputs_tensor_info[key].name: tensor
for key, tensor in input_tensor_key_feed_dict.items()
}
# Get outputs
outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(
meta_graph_def, signature_def_key)
# Sort to preserve order because we need to go from value to key later.
output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
output_tensor_names_sorted = [
outputs_tensor_info[tensor_key].name
for tensor_key in output_tensor_keys_sorted
]
with session.Session(graph=ops_lib.Graph()) as sess:
loader.load(sess, tag_set.split(','), saved_model_dir)
if tf_debug:
sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)
outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)
for i, output in enumerate(outputs):
output_tensor_key = output_tensor_keys_sorted[i]
print('Result for output key %s:\n%s' % (output_tensor_key, output))
# Only save if outdir is specified.
if outdir:
# Create directory if outdir does not exist
if not os.path.isdir(outdir):
os.makedirs(outdir)
output_full_path = os.path.join(outdir, output_tensor_key + '.npy')
# If overwrite not enabled and file already exist, error out
if not overwrite_flag and os.path.exists(output_full_path):
raise RuntimeError(
'Output file %s already exists. Add \"--overwrite\" to overwrite'
' the existing output files.' % output_full_path)
np.save(output_full_path, output)
print('Output %s is saved to %s' % (output_tensor_key,
output_full_path))
def preprocess_input_arg_string(inputs_str):
"""Parses input arg into dictionary that maps input to file/variable tuple.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input_key1': (filename1, variable_name1),
'input_key2': (file2, None)}
, which maps input keys to a tuple of file name and varaible name(None if
empty).
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by comma.
* If the command line arg for inputs is quoted and contains
whitespace(s), all whitespaces will be ignored.
* For each input key:
'input=filename<[variable_name]>'
* The "[variable_name]" key is optional. Will be set to None if not
specified.
Returns:
A dictionary that maps input keys to a tuple of file name and varaible name.
Raises:
RuntimeError: An error when the given input is in a bad format.
"""
input_dict = {}
inputs_raw = inputs_str.split(',')
for input_raw in filter(bool, inputs_raw): # skip empty strings
# Remove quotes and whitespaces
input_raw = input_raw.replace('"', '').replace('\'', '').replace(' ', '')
# Format of input=filename[variable_name]'
match = re.match(r'^([\w\-]+)=([\w\-.\/]+)\[([\w\-]+)\]$', input_raw)
if match:
input_dict[match.group(1)] = (match.group(2), match.group(3))
else:
# Format of input=filename'
match = re.match(r'^([\w\-]+)=([\w\-.\/]+)$', input_raw)
if match:
input_dict[match.group(1)] = (match.group(2), None)
else:
raise RuntimeError(
'Input \"%s\" format is incorrect. Please follow \"--inputs '
'input_key=file_name[variable_name]\" or input_key=file_name' %
input_raw)
return input_dict
def load_inputs_from_input_arg_string(inputs_str):
"""Parses input arg string and load inputs into a dictionary.
Parses input string in the format of, for example,
"input1=filename1[variable_name1],input2=filename2" into a
dictionary looks like
{'input1:0': ndarray_saved_as_variable_name1_in_filename1 ,
'input2:0': ndarray_saved_in_filename2}
, which maps input keys to a numpy ndarray loaded from file. See Args section
for more details on inputs format.
Args:
inputs_str: A string that specified where to load inputs. Each input is
separated by comma.
* If the command line arg for inputs is quoted and contains
whitespace(s), all whitespaces will be ignored.
* For each input key:
'input=filename[variable_name]'
* File specified by 'filename' will be loaded using numpy.load. Inputs
can be loaded from only .npy, .npz or pickle files.
* The "[variable_name]" key is optional depending on the input file type
as descripted in more details below.
When loading from a npy file, which always contains a numpy ndarray, the
content will be directly assigned to the specified input tensor. If a
varaible_name is specified, it will be ignored and a warning will be
issued.
When loading from a npz zip file, user can specify which variable within
the zip file to load for the input tensor inside the square brackets. If
nothing is specified, this function will check that only one file is
included in the zip and load it for the specified input tensor.
When loading from a pickle file, if no variable_name is specified in the
square brackets, whatever that is inside the pickle file will be passed
to the specified input tensor, else SavedModel CLI will assume a
dictionary is stored in the pickle file and the value corresponding to
the variable_name will be used.
Returns:
A dictionary that maps input tensor keys to a numpy ndarray loaded from
file.
Raises:
RuntimeError: An error when a key is specified, but the input file contains
multiple numpy ndarrays, none of which matches the given key.
RuntimeError: An error when no key is specified, but the input file contains
more than one numpy ndarrays.
"""
tensor_key_feed_dict = {}
for input_tensor_key, (
filename,
variable_name) in preprocess_input_arg_string(inputs_str).items():
# When a variable_name key is specified for the input file
if variable_name:
data = np.load(filename)
# if file contains a single ndarray, ignore the input name
if isinstance(data, np.ndarray):
warnings.warn(
'Input file %s contains a single ndarray. Name key \"%s\" ignored.'
% (filename, variable_name))
tensor_key_feed_dict[input_tensor_key] = data
else:
if variable_name in data:
tensor_key_feed_dict[input_tensor_key] = data[variable_name]
else:
raise RuntimeError(
'Input file %s does not contain variable with name \"%s\".' %
(filename, variable_name))
# When no key is specified for the input file.
else:
data = np.load(filename)
# Check if npz file only contains a single numpy ndarray.
if isinstance(data, np.lib.npyio.NpzFile):
variable_name_list = data.files
if len(variable_name_list) != 1:
raise RuntimeError(
'Input file %s contains more than one ndarrays. Please specify '
'the name of ndarray to use.' % filename)
tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]
else:
tensor_key_feed_dict[input_tensor_key] = data
return tensor_key_feed_dict
def show(args):
"""Function triggered by show command.
Args:
args: A namespace parsed from command line.
"""
# If all tag is specified, display all information.
if args.all:
_show_all(args.dir)
else:
# If no tag is specified, display all tag_set, if no signaure_def key is
# specified, display all SignatureDef keys, else show input output tensor
# infomation corresponding to the given SignatureDef key
if args.tag_set is None:
_show_tag_sets(args.dir)
else:
if args.signature_def is None:
_show_signature_def_map_keys(args.dir, args.tag_set)
else:
_show_inputs_outputs(args.dir, args.tag_set, args.signature_def)
def run(args):
"""Function triggered by run command.
Args:
args: A namespace parsed from command line.
"""
tensor_key_feed_dict = load_inputs_from_input_arg_string(args.inputs)
run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,
tensor_key_feed_dict, args.outdir,
args.overwrite, tf_debug=args.tf_debug)
def create_parser():
"""Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments.
"""
parser = argparse.ArgumentParser(
description='saved_model_cli: Command-line interface for SavedModel')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(
title='commands', description='valid commands', help='additional help')
# show command
show_msg = (
'Usage examples:\n'
'To show all tag-sets in a SavedModel:\n'
'$saved_model_cli show --dir /tmp/saved_model\n'
'To show all available SignatureDef keys in a '
'MetaGraphDef specified by its tag-set:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve\n'
'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '
'passed in, separated by \',\':\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\n\n'
'To show all inputs and outputs TensorInfo for a specific'
' SignatureDef specified by the SignatureDef key in a'
' MetaGraph.\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default\n\n'
'To show all available information in the SavedModel\n:'
'$saved_model_cli show --dir /tmp/saved_model --all')
parser_show = subparsers.add_parser(
'show',
description=show_msg,
formatter_class=argparse.RawTextHelpFormatter)
parser_show.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to inspect')
parser_show.add_argument(
'--all',
action='store_true',
help='if set, will output all infomation in given SavedModel')
parser_show.add_argument(
'--tag_set',
type=str,
default=None,
help='tag-set of graph in SavedModel to show, separated by \',\'')
parser_show.add_argument(
'--signature_def',
type=str,
default=None,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to display input(s) and output(s) for')
parser_show.set_defaults(func=show)
# run command
run_msg = ('Usage example:\n'
'To run input tensors from files through a MetaGraphDef and save'
' the output tensors to files:\n'
'$saved_model_cli show --dir /tmp/saved_model --tag_set serve'
'--signature_def serving_default '
'--inputs x1=/tmp/124.npz[x],x2=/tmp/123.npy'
'--outdir=/out\n\n'
'For more information about input file format, please see:\n')
parser_run = subparsers.add_parser(
'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)
parser_run.add_argument(
'--dir',
type=str,
required=True,
help='directory containing the SavedModel to execute')
parser_run.add_argument(
'--tag_set',
type=str,
required=True,
help='tag-set of graph in SavedModel to load, separated by \',\'')
parser_run.add_argument(
'--signature_def',
type=str,
required=True,
metavar='SIGNATURE_DEF_KEY',
help='key of SignatureDef to run')
msg = ('inputs in the format of \'input_key=filename[variable_name]\', '
'separated by \',\'. Inputs can only be loaded from .npy, .npz or '
'pickle files.')
parser_run.add_argument('--inputs', type=str, required=True, help=msg)
parser_run.add_argument(
'--outdir',
type=str,
default=None,
help='if specified, output tensor(s) will be saved to given directory')
parser_run.add_argument(
'--overwrite',
action='store_true',
help='if set, output file will be overwritten if it already exists.')
parser_run.add_argument(
'--tf_debug',
action='store_true',
help='if set, will use TensorFlow Debugger (tfdbg) to watch the '
'intermediate Tensors and runtime GraphDefs while running the '
'SavedModel.')
parser_run.set_defaults(func=run)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "36993e02063fb72071c1a9791b054dca",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 81,
"avg_line_length": 38.634375,
"alnum_prop": 0.6742295559330259,
"repo_name": "whn09/tensorflow",
"id": "17ef8ef9c23a6febc6fd265bc7b1ab8cb6b7fce9",
"size": "25415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/tools/saved_model_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182478"
},
{
"name": "C++",
"bytes": "23440224"
},
{
"name": "CMake",
"bytes": "158302"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "804382"
},
{
"name": "HTML",
"bytes": "654838"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "213841"
},
{
"name": "Python",
"bytes": "20372706"
},
{
"name": "Shell",
"bytes": "335987"
},
{
"name": "TypeScript",
"bytes": "1108203"
}
],
"symlink_target": ""
}
|
"""Scratchpad/chain of thought, demonstrated on gsm8k.
Scratchpad: https://arxiv.org/abs/2112.00114
Chain of thought: https://arxiv.org/abs/2201.11903
Self consistency (https://arxiv.org/abs/2203.11171) is done by marginalizing out
the `thought` in the `infer` operator.
"""
import dataclasses
import re
from typing import Dict, Iterable, Tuple, Optional, Text, List
import cascades as cc
from cascades.examples.tasks import gsm8k
# Standard chain of thought arithmetic prompts.
# From https://arxiv.org/abs/2112.00114
PROMPTS = """Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?
A: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted. So, they must have planted 21 - 15 = 6 trees. The answer is 6.
Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?
A: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.
Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?
A: Leah had 32 chocolates and Leah’s sister had 42. That means there were originally 32 + 42 = 74 chocolates. 35 have been eaten. So in total they still have 74 - 35 = 39 chocolates. The answer is 39.
Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?
A: Jason had 20 lollipops. Since he only has 12 now, he must have given the rest to Denny. The number of lollipops he has given to Denny must have been 20 - 12 = 8 lollipops. The answer is 8.
Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?
A: He has 5 toys. He got 2 from mom, so after that he has 5 + 2 = 7 toys. Then he got 2 more from dad, so in total he has 7 + 2 = 9 toys. The answer is 9.
Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?
A: There are 4 days from monday to thursday. 5 computers were added each day. That means in total 4 * 5 = 20 computers were added. There were 9 computers in the beginning, so now there are 9 + 20 = 29 computers. The answer is 29.
Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?
A: Michael initially had 58 balls. He lost 23 on Tuesday, so after that he has 58 - 23 = 35 balls. On Wednesday he lost 2 more so now he has 35 - 2 = 33 balls. The answer is 33.
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?
A: She bought 5 bagels for $3 each. This means she spent 5 * $3 = $15 on the bagels. She had $23 in beginning, so now she has $23 - $15 = $8. The answer is 8.
"""
@dataclasses.dataclass
class ReasonIO:
"""Represents a question and answer, together with reasoning."""
question: Text
reason: Optional[Text] = None
answer: Optional[Text] = None
id: Optional[Text] = dataclasses.field(default=None)
# From https://arxiv.org/abs/2203.11171
def _process_part(qa: Text) -> ReasonIO:
question, reason = qa.split('A:')
question = question.replace('Q:', '')
reason, answer = reason.split('The answer is')
answer = answer.replace('.', '')
return ReasonIO(
question=question.strip(), reason=reason.strip(), answer=answer.strip())
def load_chain_examples() -> Tuple[ReasonIO]:
"""Load the standard chain of thought prompts used in the paper."""
parts = PROMPTS.split('Q:')[1:]
chain_prompts = tuple(_process_part(x) for x in parts)
return chain_prompts
def load_gsm8k(base_dir=gsm8k.GSM8K_PATH):
"""Load and process the gsm8k dataset."""
splits = gsm8k.load_dataset(base_dir=base_dir)
train = tuple(map(process_gsm8k_example, splits['train']))
test = tuple(map(process_gsm8k_example, splits['test']))
return dict(train=train, test=test)
def process_gsm8k_example(x: Dict[Text, Text]) -> ReasonIO:
"""Convert gsm8k dicts into ReasonIO."""
return ReasonIO(
question=x['question'],
reason=x['answer'].split('####')[0].strip(),
answer=x['final_answer'],
id=x['id'])
def fewshot_prompt(examples: Iterable[ReasonIO],
target: Optional[ReasonIO] = None):
"""Construct a few shot prompt."""
parts = []
for x in examples:
if x.reason is None:
raise ValueError('Must provide reason for few shot.')
if x.answer is None:
raise ValueError('Must provide answer for few shot.')
part = f'Question: {x.question}\nReason: {x.reason}\nAnswer: {x.answer}'
parts.append(part)
if target:
part = f'Question: {target.question}\nReason'
parts.append(part)
else:
parts.append('')
prompt = '\n===\n'.join(parts).strip()
return prompt
@cc.model
def sample_with_prompts(target: ReasonIO,
examples: List[ReasonIO],
n_prompts: Optional[int] = None,
lm=None,
max_calls=4):
"""Sample an answer for target given prompts.
Args:
target: Target task.
examples: Prompting tasks.
n_prompts: If None, use all examples in prompt. Otherwise, select this many
examples to place in prompt.
lm: Language model to use.
max_calls: Max # of times to iterate the LM sampling.
Yields:
Cascade distributions (all LM sample nodes in this case).
Returns:
predicted answer string
"""
# Select a random subset and ordering of the prompts.
yield cc.log(value=target.id, name='problem_id')
if n_prompts:
chosen_examples = yield cc.sample(
cc.Choose(k=n_prompts, options=examples), name='choose_prompts')
else:
chosen_examples = examples
# Create few shot prompt
prompt = fewshot_prompt(examples=chosen_examples, target=target)
# Sample until we hit the end of example marker (`===`), then extract
# the answer as rightmost digits.
prediction = yield cc.sample(
cc.String(prompt=prompt, lm=lm, k=max_calls, until='==='), name='thought')
# Find right most number.
nums = re.findall(r'\d+', prediction)
if nums:
answer = nums[-1]
else:
yield cc.reject(reason=f'No answer found in `{prediction}`')
return answer # pytype: disable=name-error # py310-upgrade
|
{
"content_hash": "fabbacbc4b46be271cace43428fe2c74",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 229,
"avg_line_length": 44.93055555555556,
"alnum_prop": 0.6935085007727976,
"repo_name": "google-research/cascades",
"id": "9a056ef282807488891504fb3d1076823e2ac48c",
"size": "7058",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cascades/examples/scratchpad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "28435"
},
{
"name": "Python",
"bytes": "111004"
}
],
"symlink_target": ""
}
|
"""
This module implements more advanced transformations.
"""
from __future__ import annotations
import logging
import math
import warnings
from fractions import Fraction
from itertools import groupby, product
from math import gcd
from string import ascii_lowercase
import numpy as np
from monty.dev import requires
from monty.fractions import lcm
from monty.json import MSONable
from pymatgen.analysis.adsorption import AdsorbateSiteFinder
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.analysis.gb.grain import GrainBoundaryGenerator
from pymatgen.analysis.local_env import MinimumDistanceNN
from pymatgen.analysis.structure_matcher import SpinComparator, StructureMatcher
from pymatgen.analysis.structure_prediction.substitution_probability import (
SubstitutionPredictor,
)
from pymatgen.command_line.enumlib_caller import EnumError, EnumlibAdaptor
from pymatgen.command_line.mcsqs_caller import run_mcsqs
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.surface import SlabGenerator
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import (
OrderDisorderedStructureTransformation,
SubstitutionTransformation,
SupercellTransformation,
)
from pymatgen.transformations.transformation_abc import AbstractTransformation
try:
import hiphive
except ImportError:
hiphive = None
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain, Matthew Horton, Alex Ganose"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
"""
def __init__(self, charge_balance_sp):
"""
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
Returns:
Charge balanced structure.
"""
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by ChargeBalanceTransformation")
trans = SubstitutionTransformation({self.charge_balance_sp: {self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return f"Charge Balance Transformation : Species to remove = {self.charge_balance_sp}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
"""
def __init__(self, transformations, nstructures_per_trans=1):
"""
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Structures with all transformations applied.
"""
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(structure, return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{
"transformation": t,
"structure": t.apply_transformation(structure),
}
)
return structures
def __str__(self):
return f"Super Transformation : Transformations = {' '.join(map(str, self._transformations))}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MultipleSubstitutionTransformation:
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(
self,
sp_to_replace,
r_fraction,
substitution_dict,
charge_balance_species=None,
order=True,
):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Structures with all substitutions applied.
"""
if not return_ranked_list:
raise ValueError(
"MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list."
)
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = f"X{charge}{sign}"
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction,
}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation({f"X{charge}+": f"{el}{charge}{sign}"})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + f"{self.sp_to_replace}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
"""
def __init__(
self,
min_cell_size=1,
max_cell_size=1,
symm_prec=0.1,
refine_structure=False,
enum_precision_parameter=0.001,
check_ordered_symmetry=True,
max_disordered_sites=None,
sort_criteria="ewald",
timeout=None,
):
"""
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinement
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
max_disordered_sites (int):
An alternate parameter to max_cell size. Will sequentially try
larger and larger cell sizes until (i) getting a result or (ii)
the number of disordered sites in the cell exceeds
max_disordered_sites. Must set max_cell_size to None when using
this parameter.
sort_criteria (str): Sort by Ewald energy ("ewald", must have oxidation
states and slow) or by number of sites ("nsites", much faster).
timeout (float): timeout in minutes to pass to EnumlibAdaptor
"""
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.max_disordered_sites = max_disordered_sites
self.sort_criteria = sort_criteria
self.timeout = timeout
if max_cell_size and max_disordered_sites:
raise ValueError("Cannot set both max_cell_size and max_disordered_sites!")
def apply_transformation(self, structure, return_ranked_list=False):
"""
Returns either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in structure.composition.elements
)
structures = None
if structure.is_ordered:
warnings.warn(
f"Enumeration skipped for structure with composition {structure.composition} because it is ordered"
)
structures = [structure.copy()]
if self.max_disordered_sites:
ndisordered = sum(1 for site in structure if not site.is_ordered)
if ndisordered > self.max_disordered_sites:
raise ValueError(f"Too many disordered sites! ({ndisordered} > {self.max_disordered_sites})")
max_cell_sizes = range(
self.min_cell_size,
int(math.floor(self.max_disordered_sites / ndisordered)) + 1,
)
else:
max_cell_sizes = [self.max_cell_size]
for max_cell_size in max_cell_sizes:
adaptor = EnumlibAdaptor(
structure,
min_cell_size=self.min_cell_size,
max_cell_size=max_cell_size,
symm_prec=self.symm_prec,
refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry,
timeout=self.timeout,
)
try:
adaptor.run()
structures = adaptor.structures
if structures:
break
except EnumError:
warnings.warn(f"Unable to enumerate for max_cell_size = {max_cell_size}")
if structures is None:
raise ValueError("Unable to enumerate")
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple(tuple(int(round(cell)) for cell in row) for row in transformation)
if contains_oxidation_state and self.sort_criteria == "ewald":
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy, "structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return (
s["energy"] / s["num_sites"]
if contains_oxidation_state and self.sort_criteria == "ewald"
else s["num_sites"]
)
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
"""
def __init__(self, threshold=1e-2, scale_volumes=True, **kwargs):
"""
Args:
threshold: Threshold for substitution.
scale_volumes: Whether to scale volumes after substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
self.kwargs = kwargs
self.threshold = threshold
self.scale_volumes = scale_volumes
self._substitutor = SubstitutionPredictor(threshold=threshold, **kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Predicted Structures.
"""
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't support returning 1 structure")
preds = self._substitutor.composition_prediction(structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x["probability"], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred["substitutions"])
output = {
"structure": st.apply_transformation(structure),
"probability": pred["probability"],
"threshold": self.threshold,
"substitutions": {},
}
# dictionary keys have to be converted to strings for JSON
for key, value in pred["substitutions"].items():
output["substitutions"][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MagOrderParameterConstraint(MSONable):
"""
This class can be used to supply MagOrderingTransformation
to just a specific subset of species or sites that satisfy the
provided constraints. This can be useful for setting an order
parameters for, for example, ferrimagnetic structures which
might order on certain motifs, with the global order parameter
dependent on how many sites satisfy that motif.
"""
def __init__(
self,
order_parameter,
species_constraints=None,
site_constraint_name=None,
site_constraints=None,
):
"""
:param order_parameter (float): any number from 0.0 to 1.0,
typically 0.5 (antiferromagnetic) or 1.0 (ferromagnetic)
:param species_constraint (list): str or list of strings
of Species symbols that the constraint should apply to
:param site_constraint_name (str): name of the site property
that the constraint should apply to, e.g. "coordination_no"
:param site_constraints (list): list of values of the site
property that the constraints should apply to
"""
# validation
if site_constraints and site_constraints != [None] and not site_constraint_name:
raise ValueError("Specify the name of the site constraint.")
if not site_constraints and site_constraint_name:
raise ValueError("Please specify some site constraints.")
if not isinstance(species_constraints, list):
species_constraints = [species_constraints]
if not isinstance(site_constraints, list):
site_constraints = [site_constraints]
if order_parameter > 1 or order_parameter < 0:
raise ValueError("Order parameter must lie between 0 and 1")
if order_parameter != 0.5:
warnings.warn(
"Use care when using a non-standard order parameter, "
"though it can be useful in some cases it can also "
"lead to unintended behavior. Consult documentation."
)
self.order_parameter = order_parameter
self.species_constraints = species_constraints
self.site_constraint_name = site_constraint_name
self.site_constraints = site_constraints
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
satisfies_constraints = self.species_constraints and str(site.specie) in self.species_constraints
if self.site_constraint_name and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
satisfies_constraints = prop in self.site_constraints
return satisfies_constraints
class MagOrderingTransformation(AbstractTransformation):
"""
This transformation takes a structure and returns a list of collinear
magnetic orderings. For disordered structures, make an ordered
approximation first.
"""
def __init__(self, mag_species_spin, order_parameter=0.5, energy_model=SymmetryModel(), **kwargs):
"""
:param mag_species_spin: A mapping of elements/species to their
spin magnitudes, e.g. {"Fe3+": 5, "Mn3+": 4}
:param order_parameter (float or list): if float, a specifies a
global order parameter and can take values from 0.0 to 1.0
(e.g. 0.5 for antiferromagnetic or 1.0 for ferromagnetic), if
list has to be a list of
:class: `pymatgen.transformations.advanced_transformations.MagOrderParameterConstraint`
to specify more complicated orderings, see documentation for
MagOrderParameterConstraint more details on usage
:param energy_model: Energy model to rank the returned structures,
see :mod: `pymatgen.analysis.energy_models` for more information (note
that this is not necessarily a physical energy). By default, returned
structures use SymmetryModel() which ranks structures from most
symmetric to least.
:param kwargs: Additional kwargs that are passed to
:class:`EnumerateStructureTransformation` such as min_cell_size etc.
"""
# checking for sensible order_parameter values
if isinstance(order_parameter, float):
# convert to constraint format
order_parameter = [
MagOrderParameterConstraint(
order_parameter=order_parameter,
species_constraints=list(mag_species_spin.keys()),
)
]
elif isinstance(order_parameter, list):
ops = [isinstance(item, MagOrderParameterConstraint) for item in order_parameter]
if not any(ops):
raise ValueError("Order parameter not correctly defined.")
else:
raise ValueError("Order parameter not correctly defined.")
self.mag_species_spin = mag_species_spin
# store order parameter constraints as dicts to save implementing
# to/from dict methods for MSONable compatibility
self.order_parameter = [op.as_dict() for op in order_parameter]
self.energy_model = energy_model
self.enum_kwargs = kwargs
@staticmethod
def determine_min_cell(disordered_structure):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
# assumes all order parameters for a given species are the same
mag_species_order_parameter = {}
mag_species_occurrences = {}
for idx, site in enumerate(disordered_structure):
if not site.is_ordered:
op = max(site.species.values())
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(list(site.species.keys())[0]).split(",", maxsplit=1)[0]
if sp in mag_species_order_parameter:
mag_species_occurrences[sp] += 1
else:
mag_species_order_parameter[sp] = op
mag_species_occurrences[sp] = 1
smallest_n = []
for sp, order_parameter in mag_species_order_parameter.items():
denom = Fraction(order_parameter).limit_denominator(100).denominator
num_atom_per_specie = mag_species_occurrences[sp]
n_gcd = gcd(denom, num_atom_per_specie)
smallest_n.append(lcm(int(n_gcd), denom) / n_gcd)
return max(smallest_n)
@staticmethod
def _add_dummy_species(structure, order_parameters):
"""
:param structure: ordered Structure
:param order_parameters: list of MagOrderParameterConstraints
:return: A structure decorated with disordered
DummySpecies on which to perform the enumeration.
Note that the DummySpecies are super-imposed on
to the original sites, to make it easier to
retrieve the original site after enumeration is
performed (this approach is preferred over a simple
mapping since multiple species may have the same
DummySpecies, depending on the constraints specified).
This approach can also preserve site properties even after
enumeration.
"""
dummy_struct = structure.copy()
def generate_dummy_specie():
"""
Generator which returns DummySpecies symbols Mma, Mmb, etc.
"""
subscript_length = 1
while True:
for subscript in product(ascii_lowercase, repeat=subscript_length):
yield "Mm" + "".join(subscript)
subscript_length += 1
dummy_species_gen = generate_dummy_specie()
# one dummy species for each order parameter constraint
dummy_species_symbols = [next(dummy_species_gen) for i in range(len(order_parameters))]
dummy_species = [
{
DummySpecies(symbol, properties={"spin": Spin.up}): constraint.order_parameter,
DummySpecies(symbol, properties={"spin": Spin.down}): 1 - constraint.order_parameter,
}
for symbol, constraint in zip(dummy_species_symbols, order_parameters)
]
for idx, site in enumerate(dummy_struct):
satisfies_constraints = [c.satisfies_constraint(site) for c in order_parameters]
if satisfies_constraints.count(True) > 1:
# site should either not satisfy any constraints, or satisfy
# one constraint
raise ValueError(f"Order parameter constraints conflict for site: {site.specie}, {site.properties}")
if any(satisfies_constraints):
dummy_specie_idx = satisfies_constraints.index(True)
dummy_struct.append(dummy_species[dummy_specie_idx], site.coords, site.lattice)
return dummy_struct
@staticmethod
def _remove_dummy_species(structure):
"""
:return: Structure with dummy species removed, but
their corresponding spin properties merged with the
original sites. Used after performing enumeration.
"""
if not structure.is_ordered:
raise Exception("Something went wrong with enumeration.")
sites_to_remove = []
logger.debug(f"Dummy species structure:\n{structure}")
for idx, site in enumerate(structure):
if isinstance(site.specie, DummySpecies):
sites_to_remove.append(idx)
spin = site.specie._properties.get("spin", None)
neighbors = structure.get_neighbors(
site,
0.05, # arbitrary threshold, needs to be << any bond length
# but >> floating point precision issues
include_index=True,
)
if len(neighbors) != 1:
raise Exception(f"This shouldn't happen, found neighbors: {neighbors}")
orig_site_idx = neighbors[0][2]
orig_specie = structure[orig_site_idx].specie
new_specie = Species(
orig_specie.symbol,
getattr(orig_specie, "oxi_state", None),
properties={"spin": spin},
)
structure.replace(
orig_site_idx,
new_specie,
properties=structure[orig_site_idx].properties,
)
structure.remove_sites(sites_to_remove)
logger.debug(f"Structure with dummy species removed:\n{structure}")
return structure
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, "_properties", None):
spin = site.specie._properties.get("spin", None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",", maxsplit=1)[0]
new_properties.update({"spin": sign * self.mag_species_spin.get(sp, 0)})
new_specie = Species(
site.specie.symbol,
getattr(site.specie, "oxi_state", None),
new_properties,
)
structure.replace(idx, new_specie, properties=site.properties)
logger.debug(f"Structure with spin magnitudes:\n{structure}")
return structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply MagOrderTransformation to an input structure.
:param structure: Any ordered structure.
:param return_ranked_list: As in other Transformations.
:return:
"""
if not structure.is_ordered:
raise ValueError("Create an ordered approximation of your input structure first.")
# retrieve order parameters
order_parameters = [MagOrderParameterConstraint.from_dict(op_dict) for op_dict in self.order_parameter]
# add dummy species on which to perform enumeration
structure = self._add_dummy_species(structure, order_parameters)
# trivial case
if structure.is_ordered:
structure = self._remove_dummy_species(structure)
return [structure] if return_ranked_list > 1 else structure
enum_kwargs = self.enum_kwargs.copy()
enum_kwargs["min_cell_size"] = max(int(self.determine_min_cell(structure)), enum_kwargs.get("min_cell_size", 1))
if enum_kwargs.get("max_cell_size", None):
if enum_kwargs["min_cell_size"] > enum_kwargs["max_cell_size"]:
warnings.warn(
f"Specified max cell size ({enum_kwargs['max_cell_size']}) is "
"smaller than the minimum enumerable cell size "
f"({enum_kwargs['min_cell_size']}), changing max cell size to "
f"{enum_kwargs['min_cell_size']}"
)
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
else:
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
t = EnumerateStructureTransformation(**enum_kwargs)
alls = t.apply_transformation(structure, return_ranked_list=return_ranked_list)
# handle the fact that EnumerateStructureTransformation can either
# return a single Structure or a list
if isinstance(alls, Structure):
# remove dummy species and replace Spin.up or Spin.down
# with spin magnitudes given in mag_species_spin arg
alls = self._remove_dummy_species(alls)
alls = self._add_spin_magnitudes(alls)
else:
for idx, _ in enumerate(alls):
alls[idx]["structure"] = self._remove_dummy_species(alls[idx]["structure"])
alls[idx]["structure"] = self._add_spin_magnitudes(alls[idx]["structure"])
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
# remove duplicate structures and group according to energy model
m = StructureMatcher(comparator=SpinComparator())
def key(x):
return SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted((d["structure"] for d in alls), key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0], "energy": self.energy_model.get_energy(g[0])} for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Species) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Species) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sp = Species(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except Exception:
pass
return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(
self,
dopant,
ionic_radius_tol=float("inf"),
min_length=10,
alio_tol=0,
codopant=False,
max_structures_per_enum=100,
allowed_doping_species=None,
**kwargs,
):
"""
Args:
dopant (Species-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
**kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info(f"Composition: {comp}")
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol
]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp
for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol
and abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol
and sp.oxi_state * ox >= 0
]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species if sp in [get_el_sp(s) for s in self.allowed_doping_species]
]
logger.info(f"Compatible species: {compatible_species}")
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length / x)))) for x in lengths]
logger.info(f"Lengths are {str(lengths)}")
logger.info(f"Scaling = {str(scaling)}")
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1) / nsp, self.dopant: 1 / nsp}})
logger.info(f"Doping {sp} for {self.dopant} at level {1 / nsp:.3f}")
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp, self.dopant: 1 / nsp, codopant: 1 / nsp}})
logger.info(f"Doping {sp} for {self.dopant} + {codopant} at level {1 / nsp:.3f}")
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min(
(s for s in comp if s.oxi_state * ox > 0),
key=lambda ss: abs(ss.oxi_state),
)
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info(f"Doping {nsp_to_remove} {sp} with {ndopant} {self.dopant}.")
supercell.replace_species(
{
sp: {
sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp,
}
}
)
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info(
"Doping %d %s with %s and removing %d %s."
% (ndopant, sp, self.dopant, nx_to_remove, sp_to_remove)
)
supercell.replace_species(
{
sp: {sp: (nsp - ndopant) / nsp, self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx},
}
)
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(), key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(), key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info(f"Doping {ndopant} {sp} with {self.dopant} and removing {nx_to_remove} {sp_to_remove}.")
supercell.replace_species(
{
sp: {sp: (nsp - ndopant) / nsp, self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx},
}
)
ss = t.apply_transformation(supercell, return_ranked_list=self.max_structures_per_enum)
logger.info(f"{len(ss)} distinct structures")
all_structures.extend(ss)
logger.info(f"Total {len(all_structures)} doped structures")
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(
self,
miller_index,
min_slab_size,
min_vacuum_size,
lll_reduce=False,
center_slab=False,
in_unit_planes=False,
primitive=True,
max_normal_search=None,
shift=0,
tol=0.1,
):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = tol
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
Returns:
Slab Structures.
"""
sg = SlabGenerator(
structure,
self.miller_index,
self.min_slab_size,
self.min_vacuum_size,
self.lll_reduce,
self.center_slab,
self.in_unit_planes,
self.primitive,
self.max_normal_search,
)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class DisorderOrderedTransformation(AbstractTransformation):
"""
Not to be confused with OrderDisorderedTransformation,
this transformation attempts to obtain a
*disordered* structure from an input ordered structure.
This may or may not be physically plausible, further
inspection of the returned structures is advised.
The main purpose for this transformation is for structure
matching to crystal prototypes for structures that have
been derived from a parent prototype structure by
substitutions or alloying additions.
"""
def __init__(self, max_sites_to_merge=2):
"""
Args:
max_sites_to_merge: only merge this number of sites together
"""
self.max_sites_to_merge = max_sites_to_merge
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: ordered structure
return_ranked_list: as in other pymatgen Transformations
Returns:
Transformed disordered structure(s)
"""
if not structure.is_ordered:
raise ValueError("This transformation is for disordered structures only.")
partitions = self._partition_species(structure.composition, max_components=self.max_sites_to_merge)
disorder_mappings = self._get_disorder_mappings(structure.composition, partitions)
disordered_structures = []
for mapping in disorder_mappings:
disordered_structure = structure.copy()
disordered_structure.replace_species(mapping)
disordered_structures.append({"structure": disordered_structure, "mapping": mapping})
if len(disordered_structures) == 0:
return None
if not return_ranked_list:
return disordered_structures[0]["structure"]
if len(disordered_structures) > return_ranked_list:
disordered_structures = disordered_structures[0:return_ranked_list]
return disordered_structures
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
@staticmethod
def _partition_species(composition, max_components=2):
"""
Private method to split a list of species into
various partitions.
"""
def _partition(collection):
# thanks https://stackoverflow.com/a/30134039
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in _partition(collection[1:]):
# insert `first` in each of the subpartition's subsets
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n + 1 :]
# put `first` in its own subset
yield [[first]] + smaller
def _sort_partitions(partitions_to_sort):
"""
Sort partitions by those we want to check first
(typically, merging two sites into one is the
one to try first).
"""
partition_indices = [(idx, [len(p) for p in partition]) for idx, partition in enumerate(partitions_to_sort)]
# sort by maximum length of partition first (try smallest maximums first)
# and secondarily by number of partitions (most partitions first, i.e.
# create the 'least disordered' structures first)
partition_indices = sorted(partition_indices, key=lambda x: (max(x[1]), -len(x[1])))
# merge at most max_component sites,
# e.g. merge at most 2 species into 1 disordered site
partition_indices = [x for x in partition_indices if max(x[1]) <= max_components]
partition_indices.pop(0) # this is just the input structure
sorted_partitions = [partitions_to_sort[x[0]] for x in partition_indices]
return sorted_partitions
collection = list(composition.keys())
partitions = list(_partition(collection))
partitions = _sort_partitions(partitions)
return partitions
@staticmethod
def _get_disorder_mappings(composition, partitions):
"""
Private method to obtain the mapping to create
a disordered structure from a given partition.
"""
def _get_replacement_dict_from_partition(partition):
d = {} # to be passed to Structure.replace_species()
for sp_list in partition:
if len(sp_list) > 1:
total_occ = sum(composition[sp] for sp in sp_list)
merged_comp = {sp: composition[sp] / total_occ for sp in sp_list}
for sp in sp_list:
d[sp] = merged_comp
return d
disorder_mapping = [_get_replacement_dict_from_partition(p) for p in partitions]
return disorder_mapping
class GrainBoundaryTransformation(AbstractTransformation):
"""
A transformation that creates a gb from a bulk structure.
"""
def __init__(
self,
rotation_axis,
rotation_angle,
expand_times=4,
vacuum_thickness=0.0,
ab_shift=None,
normal=False,
ratio=True,
plane=None,
max_search=20,
tol_coi=1.0e-8,
rm_ratio=0.7,
quick_gen=False,
):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float): The thickness of vacuum that you want to insert between
two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers): lattice axial ratio.
If True, will try to determine automatically from structure.
For cubic system, ratio is not needed and can be set to None.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to None.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma with enum*
sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio * bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb (Structure) object).
"""
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.expand_times = expand_times
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift or [0, 0]
self.normal = normal
self.ratio = ratio
self.plane = plane
self.max_search = max_search
self.tol_coi = tol_coi
self.rm_ratio = rm_ratio
self.quick_gen = quick_gen
def apply_transformation(self, structure):
"""
Applies the transformation.
Args:
structure: Input Structure
return_ranked_list: Number of structures to return.
Returns:
Grain boundary Structures.
"""
gbg = GrainBoundaryGenerator(structure)
gb_struct = gbg.gb_from_parameters(
self.rotation_axis,
self.rotation_angle,
expand_times=self.expand_times,
vacuum_thickness=self.vacuum_thickness,
ab_shift=self.ab_shift,
normal=self.normal,
ratio=gbg.get_ratio() if self.ratio is True else self.ratio,
plane=self.plane,
max_search=self.max_search,
tol_coi=self.tol_coi,
rm_ratio=self.rm_ratio,
quick_gen=self.quick_gen,
)
return gb_struct
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class CubicSupercellTransformation(AbstractTransformation):
"""
A transformation that aims to generate a nearly cubic supercell structure
from a structure.
The algorithm solves for a transformation matrix that makes the supercell
cubic. The matrix must have integer entries, so entries are rounded (in such
a way that forces the matrix to be nonsingular). From the supercell
resulting from this transformation matrix, vector projections are used to
determine the side length of the largest cube that can fit inside the
supercell. The algorithm will iteratively increase the size of the supercell
until the largest inscribed cube's side length is at least 'min_length'
and the number of atoms in the supercell falls in the range
``min_atoms < n < max_atoms``.
"""
def __init__(
self,
min_atoms: int | None = None,
max_atoms: int | None = None,
min_length: float = 15.0,
force_diagonal: bool = False,
):
"""
Args:
max_atoms: Maximum number of atoms allowed in the supercell.
min_atoms: Minimum number of atoms allowed in the supercell.
min_length: Minimum length of the smallest supercell lattice vector.
force_diagonal: If True, return a transformation with a diagonal
transformation matrix.
"""
self.min_atoms = min_atoms if min_atoms else -np.Inf
self.max_atoms = max_atoms if max_atoms else np.Inf
self.min_length = min_length
self.force_diagonal = force_diagonal
self.transformation_matrix = None
def apply_transformation(self, structure: Structure) -> Structure:
"""
The algorithm solves for a transformation matrix that makes the
supercell cubic. The matrix must have integer entries, so entries are
rounded (in such a way that forces the matrix to be nonsingular). From
the supercell resulting from this transformation matrix, vector
projections are used to determine the side length of the largest cube
that can fit inside the supercell. The algorithm will iteratively
increase the size of the supercell until the largest inscribed cube's
side length is at least 'num_nn_dists' times the nearest neighbor
distance and the number of atoms in the supercell falls in the range
defined by min_atoms and max_atoms.
Returns:
supercell: Transformed supercell.
"""
lat_vecs = structure.lattice.matrix
# boolean for if a sufficiently large supercell has been created
sc_not_found = True
if self.force_diagonal:
scale = self.min_length / np.array(structure.lattice.abc)
self.transformation_matrix = np.diag(np.ceil(scale).astype(int))
st = SupercellTransformation(self.transformation_matrix)
return st.apply_transformation(structure)
# target_threshold is used as the desired cubic side lengths
target_sc_size = self.min_length
while sc_not_found:
target_sc_lat_vecs = np.eye(3, 3) * target_sc_size
self.transformation_matrix = target_sc_lat_vecs @ np.linalg.inv(lat_vecs)
# round the entries of T and force T to be nonsingular
self.transformation_matrix = _round_and_make_arr_singular(self.transformation_matrix) # type: ignore
proposed_sc_lat_vecs = self.transformation_matrix @ lat_vecs
# Find the shortest dimension length and direction
a = proposed_sc_lat_vecs[0]
b = proposed_sc_lat_vecs[1]
c = proposed_sc_lat_vecs[2]
length1_vec = c - _proj(c, a) # a-c plane
length2_vec = a - _proj(a, c)
length3_vec = b - _proj(b, a) # b-a plane
length4_vec = a - _proj(a, b)
length5_vec = b - _proj(b, c) # b-c plane
length6_vec = c - _proj(c, b)
length_vecs = np.array(
[
length1_vec,
length2_vec,
length3_vec,
length4_vec,
length5_vec,
length6_vec,
]
)
# Get number of atoms
st = SupercellTransformation(self.transformation_matrix)
superstructure = st.apply_transformation(structure)
num_at = superstructure.num_sites
# Check if constraints are satisfied
if (
np.min(np.linalg.norm(length_vecs, axis=1)) >= self.min_length
and self.min_atoms <= num_at <= self.max_atoms
):
return superstructure
# Increase threshold until proposed supercell meets requirements
target_sc_size += 0.1
if num_at > self.max_atoms:
raise AttributeError(
"While trying to solve for the supercell, the max "
"number of atoms was exceeded. Try lowering the number"
"of nearest neighbor distances."
)
raise AttributeError("Unable to find cubic supercell")
@property
def inverse(self):
"""
Returns:
None
"""
return None
@property
def is_one_to_many(self):
"""
Returns:
False
"""
return False
class AddAdsorbateTransformation(AbstractTransformation):
"""
Create absorbate structures.
"""
def __init__(
self,
adsorbate,
selective_dynamics=False,
height=0.9,
mi_vec=None,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Use AdsorbateSiteFinder to add an absorbate to a slab.
Args:
adsorbate (Molecule): molecule to add as adsorbate
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec : vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
self.adsorbate = adsorbate
self.selective_dynamics = selective_dynamics
self.height = height
self.mi_vec = mi_vec
self.repeat = repeat
self.min_lw = min_lw
self.translate = translate
self.reorient = reorient
self.find_args = find_args
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: Must be a Slab structure
return_ranked_list: Whether or not multiple structures are
returned. If return_ranked_list is a number, up to that number of
structures is returned.
Returns: Slab with adsorbate
"""
sitefinder = AdsorbateSiteFinder(
structure,
selective_dynamics=self.selective_dynamics,
height=self.height,
mi_vec=self.mi_vec,
)
structures = sitefinder.generate_adsorption_structures(
self.adsorbate,
repeat=self.repeat,
min_lw=self.min_lw,
translate=self.translate,
reorient=self.reorient,
find_args=self.find_args,
)
if not return_ranked_list:
return structures[0]
return [{"structure": structure} for structure in structures[:return_ranked_list]]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _round_and_make_arr_singular(arr: np.ndarray) -> np.ndarray:
"""
This function rounds all elements of a matrix to the nearest integer,
unless the rounding scheme causes the matrix to be singular, in which
case elements of zero rows or columns in the rounded matrix with the
largest absolute valued magnitude in the unrounded matrix will be
rounded to the next integer away from zero rather than to the
nearest integer.
The transformation is as follows. First, all entries in 'arr' will be
rounded to the nearest integer to yield 'arr_rounded'. If 'arr_rounded'
has any zero rows, then one element in each zero row of 'arr_rounded'
corresponding to the element in 'arr' of that row with the largest
absolute valued magnitude will be rounded to the next integer away from
zero (see the '_round_away_from_zero(x)' function) rather than the
nearest integer. This process is then repeated for zero columns. Also
note that if 'arr' already has zero rows or columns, then this function
will not change those rows/columns.
Args:
arr: Input matrix
Returns:
Transformed matrix.
"""
def round_away_from_zero(x):
"""
Returns 'x' rounded to the next integer away from 0.
If 'x' is zero, then returns zero.
E.g. -1.2 rounds to -2.0. 1.2 rounds to 2.0.
"""
abs_x = abs(x)
return math.ceil(abs_x) * (abs_x / x) if x != 0 else 0
arr_rounded = np.around(arr)
# Zero rows in 'arr_rounded' make the array singular, so force zero rows to
# be nonzero
if (~arr_rounded.any(axis=1)).any():
# Check for zero rows in T_rounded
# indices of zero rows
zero_row_idxs = np.where(~arr_rounded.any(axis=1))[0]
for zero_row_idx in zero_row_idxs: # loop over zero rows
zero_row = arr[zero_row_idx, :]
# Find the element of the zero row with the largest absolute
# magnitude in the original (non-rounded) array (i.e. 'arr')
matches = np.absolute(zero_row) == np.amax(np.absolute(zero_row))
col_idx_to_fix = np.where(matches)[0]
# Break ties for the largest absolute magnitude
r_idx = np.random.randint(len(col_idx_to_fix))
col_idx_to_fix = col_idx_to_fix[r_idx]
# Round the chosen element away from zero
arr_rounded[zero_row_idx, col_idx_to_fix] = round_away_from_zero(arr[zero_row_idx, col_idx_to_fix])
# Repeat process for zero columns
if (~arr_rounded.any(axis=0)).any():
# Check for zero columns in T_rounded
zero_col_idxs = np.where(~arr_rounded.any(axis=0))[0]
for zero_col_idx in zero_col_idxs:
zero_col = arr[:, zero_col_idx]
matches = np.absolute(zero_col) == np.amax(np.absolute(zero_col))
row_idx_to_fix = np.where(matches)[0]
for i in row_idx_to_fix:
arr_rounded[i, zero_col_idx] = round_away_from_zero(arr[i, zero_col_idx])
return arr_rounded.astype(int)
class SubstituteSurfaceSiteTransformation(AbstractTransformation):
"""
Use AdsorptionSiteFinder to perform substitution-type doping on the surface
and returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
"""
def __init__(
self,
atom,
selective_dynamics=False,
height=0.9,
mi_vec=None,
target_species=None,
sub_both_sides=False,
range_tol=1e-2,
dist_from_surf=0,
):
"""
Args:
atom (str): atom corresponding to substitutional dopant
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec : vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
target_species: List of specific species to substitute
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
self.atom = atom
self.selective_dynamics = selective_dynamics
self.height = height
self.mi_vec = mi_vec
self.target_species = target_species
self.sub_both_sides = sub_both_sides
self.range_tol = range_tol
self.dist_from_surf = dist_from_surf
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: Must be a Slab structure
return_ranked_list: Whether or not multiple structures are
returned. If return_ranked_list is a number, up to that number of
structures is returned.
Returns: Slab with sites substituted
"""
sitefinder = AdsorbateSiteFinder(
structure,
selective_dynamics=self.selective_dynamics,
height=self.height,
mi_vec=self.mi_vec,
)
structures = sitefinder.generate_substitution_structures(
self.atom,
target_species=self.target_species,
sub_both_sides=self.sub_both_sides,
range_tol=self.range_tol,
dist_from_surf=self.dist_from_surf,
)
if not return_ranked_list:
return structures[0]
return [{"structure": structure} for structure in structures[:return_ranked_list]]
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
def _proj(b, a):
"""
Returns vector projection (np.ndarray) of vector b (np.ndarray)
onto vector a (np.ndarray)
"""
return (b.T @ (a / np.linalg.norm(a))) * (a / np.linalg.norm(a))
class SQSTransformation(AbstractTransformation):
"""
A transformation that creates a special quasirandom structure (SQS) from a structure with partial occupancies.
"""
def __init__(
self,
scaling,
cluster_size_and_shell=None,
search_time=60,
directory=None,
instances=None,
temperature=1,
wr=1,
wn=1,
wd=0.5,
tol=1e-3,
best_only=True,
remove_duplicate_structures=True,
reduction_algo="LLL",
):
"""
Args:
structure (Structure): Disordered pymatgen Structure object
scaling (int or list): Scaling factor to determine supercell. Two options are possible:
a. (preferred) Scales number of atoms, e.g., for a structure with 8 atoms,
scaling=4 would lead to a 32 atom supercell
b. A sequence of three scaling factors, e.g., [2, 1, 1], which
specifies that the supercell should have dimensions 2a x b x c
cluster_size_and_shell (Optional[Dict[int, int]]): Dictionary of cluster interactions with entries in
the form number of atoms: nearest neighbor shell
Keyword Args:
search_time (float): Time spent looking for the ideal SQS in minutes (default: 60)
directory (str): Directory to run mcsqs calculation and store files (default: None
runs calculations in a temp directory)
instances (int): Specifies the number of parallel instances of mcsqs to run
(default: number of cpu cores detected by Python)
temperature (int or float): Monte Carlo temperature (default: 1), "T" in atat code
wr (int or float): Weight assigned to range of perfect correlation match in objective
function (default = 1)
wn (int or float): Multiplicative decrease in weight per additional point in cluster (default: 1)
wd (int or float): Exponent of decay in weight as function of cluster diameter (default: 0)
tol (int or float): Tolerance for matching correlations (default: 1e-3)
best_only (bool): only return structures with lowest objective function
remove_duplicate_structures (bool): only return unique structures
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"False" does not reduce structure.
"""
self.scaling = scaling
self.search_time = search_time
self.cluster_size_and_shell = cluster_size_and_shell
self.directory = directory
self.instances = instances
self.temperature = temperature
self.wr = wr
self.wn = wn
self.wd = wd
self.tol = tol
self.best_only = best_only
self.remove_duplicate_structures = remove_duplicate_structures
self.reduction_algo = reduction_algo
@staticmethod
def _get_max_neighbor_distance(struc, shell):
"""
Calculate maximum nearest neighbor distance
Args:
struc: pymatgen Structure object
shell: nearest neighbor shell, such that shell=1 is the first nearest
neighbor, etc.
Returns:
maximum nearest neighbor distance, in angstroms
"""
mdnn = MinimumDistanceNN()
distances = []
for site_num, site in enumerate(struc):
shell_info = mdnn.get_nn_shell_info(struc, site_num, shell)
for entry in shell_info:
image = entry["image"]
distance = site.distance(struc[entry["site_index"]], jimage=image)
distances.append(distance)
return max(distances)
@staticmethod
def _get_disordered_substructure(struc_disordered):
"""
Converts disordered structure into a substructure consisting of only disordered sites
Args:
struc_disordered: pymatgen disordered Structure object
Returns:
pymatgen Structure object representing a substructure of disordered sites
"""
disordered_substructure = struc_disordered.copy()
idx_to_remove = []
for idx, site in enumerate(disordered_substructure.sites):
if site.is_ordered:
idx_to_remove.append(idx)
disordered_substructure.remove_sites(idx_to_remove)
return disordered_substructure
@staticmethod
def _sqs_cluster_estimate(struc_disordered, cluster_size_and_shell: dict[int, int] | None = None):
"""
Set up an ATAT cluster.out file for a given structure and set of constraints
Args:
struc_disordered: disordered pymatgen Structure object
cluster_size_and_shell: dict of integers {cluster: shell}
Returns:
dict of {cluster size: distance in angstroms} for mcsqs calculation
"""
cluster_size_and_shell = cluster_size_and_shell or {2: 3, 3: 2, 4: 1}
disordered_substructure = SQSTransformation._get_disordered_substructure(struc_disordered)
clusters = {}
for cluster_size, shell in cluster_size_and_shell.items():
max_distance = SQSTransformation._get_max_neighbor_distance(disordered_substructure, shell)
clusters[cluster_size] = max_distance + 0.01 # add small tolerance
return clusters
def apply_transformation(self, structure, return_ranked_list=False):
"""
Applies SQS transformation
Args:
structure (pymatgen Structure): pymatgen Structure with partial occupancies
return_ranked_list (bool): number of structures to return
Returns:
pymatgen Structure which is an SQS of the input structure
"""
if return_ranked_list and self.instances is None:
raise ValueError("mcsqs has no instances, so cannot return a ranked list")
if (
isinstance(return_ranked_list, int)
and isinstance(self.instances, int)
and return_ranked_list > self.instances
):
raise ValueError("return_ranked_list cannot be less that number of instances")
clusters = self._sqs_cluster_estimate(structure, self.cluster_size_and_shell)
# useful for debugging and understanding
self._last_used_clusters = clusters
sqs = run_mcsqs(
structure=structure,
clusters=clusters,
scaling=self.scaling,
search_time=self.search_time,
directory=self.directory,
instances=self.instances,
temperature=self.temperature,
wr=self.wr,
wn=self.wn,
wd=self.wd,
tol=self.tol,
)
return self._get_unique_bestsqs_strucs(
sqs,
best_only=self.best_only,
return_ranked_list=return_ranked_list,
remove_duplicate_structures=self.remove_duplicate_structures,
reduction_algo=self.reduction_algo,
)
@staticmethod
def _get_unique_bestsqs_strucs(sqs, best_only, return_ranked_list, remove_duplicate_structures, reduction_algo):
"""
Gets unique sqs structures with lowest objective function. Requires an mcsqs output that has been run
in parallel, otherwise returns Sqs.bestsqs
Args:
sqs (Sqs): Sqs class object.
best_only (bool): only return structures with lowest objective function.
return_ranked_list (bool): Number of structures to return.
remove_duplicate_structures (bool): only return unique structures.
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"False" does not reduce structure.
Returns:
list of dicts of the form {'structure': Structure, 'objective_function': ...}, unless run in serial
(returns a single structure Sqs.bestsqs)
"""
if not return_ranked_list:
return_struc = sqs.bestsqs
# reduce structure
if reduction_algo:
return_struc = return_struc.get_reduced_structure(reduction_algo=reduction_algo)
# return just the structure
return return_struc
strucs = []
for d in sqs.allsqs:
# filter for best structures only if enabled, else use full sqs.all_sqs list
if (not best_only) or (best_only and d["objective_function"] == sqs.objective_function):
struc = d["structure"]
# add temporary objective_function attribute to access objective_function after grouping
struc.objective_function = d["objective_function"]
strucs.append(struc)
if remove_duplicate_structures:
matcher = StructureMatcher()
# sort by unique structures ... can take a while for a long list of strucs
unique_strucs_grouped = matcher.group_structures(strucs)
# get unique structures only
strucs = [group[0] for group in unique_strucs_grouped]
# sort structures by objective function
strucs.sort(key=lambda x: x.objective_function if isinstance(x.objective_function, float) else -np.inf)
to_return = [{"structure": struc, "objective_function": struc.objective_function} for struc in strucs]
for d in to_return:
# delete temporary objective_function attribute
del d["structure"].objective_function
# reduce structure
if reduction_algo:
d["structure"] = d["structure"].get_reduced_structure(reduction_algo=reduction_algo)
if isinstance(return_ranked_list, int):
return to_return[:return_ranked_list]
return to_return
@property
def inverse(self):
"""Returns: None"""
return None
@property
def is_one_to_many(self):
"""Returns: True"""
return True
class MonteCarloRattleTransformation(AbstractTransformation):
r"""
Uses a Monte Carlo rattle procedure to randomly perturb the sites in a
structure.
This class requires the hiPhive package to be installed.
Rattling atom `i` is carried out as a Monte Carlo move that is accepted with
a probability determined from the minimum interatomic distance
:math:`d_{ij}`. If :math:`\\min(d_{ij})` is smaller than :math:`d_{min}`
the move is only accepted with a low probability.
This process is repeated for each atom a number of times meaning
the magnitude of the final displacements is not *directly*
connected to `rattle_std`.
"""
@requires(hiphive, "hiphive is required for MonteCarloRattleTransformation")
def __init__(self, rattle_std: float, min_distance: float, seed: int | None = None, **kwargs):
"""
Args:
rattle_std: Rattle amplitude (standard deviation in normal
distribution). Note: this value is not *directly* connected to the
final average displacement for the structures
min_distance: Interatomic distance used for computing the probability
for each rattle move.
seed: Seed for setting up NumPy random state from which random numbers
are generated. If ``None``, a random seed will be generated
(default). This option allows the output of this transformation
to be deterministic.
**kwargs: Additional keyword arguments to be passed to the hiPhive
mc_rattle function.
"""
self.rattle_std = rattle_std
self.min_distance = min_distance
self.seed = seed
if not seed:
# if seed is None, use a random RandomState seed but make sure
# we store that the original seed was None
seed = np.random.randint(1, 1000000000)
self.random_state = np.random.RandomState(seed) # pylint: disable=E1101
self.kwargs = kwargs
def apply_transformation(self, structure: Structure) -> Structure:
"""
Apply the transformation.
Args:
structure: Input Structure
Returns:
Structure with sites perturbed.
"""
from hiphive.structure_generation.rattle import mc_rattle
atoms = AseAtomsAdaptor.get_atoms(structure)
seed = self.random_state.randint(1, 1000000000)
displacements = mc_rattle(atoms, self.rattle_std, self.min_distance, seed=seed, **self.kwargs)
transformed_structure = Structure(
structure.lattice,
structure.species,
structure.cart_coords + displacements,
coords_are_cartesian=True,
)
return transformed_structure
def __str__(self):
return f"{__name__} : rattle_std = {self.rattle_std}"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
|
{
"content_hash": "bfa5f93069ceb61068235a654e7a46f2",
"timestamp": "",
"source": "github",
"line_count": 2227,
"max_line_length": 120,
"avg_line_length": 39.47283340817243,
"alnum_prop": 0.5972402338861966,
"repo_name": "davidwaroquiers/pymatgen",
"id": "b17e93a1ad75f8395c8c152b46d5618ddf4a9b19",
"size": "88000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/transformations/advanced_transformations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9213466"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
}
|
"""Perform MCMC follow-up fitting."""
import os
from os.path import join,exists,basename,splitext
import shutil
from collections import OrderedDict as odict
from multiprocessing import Pool
import matplotlib
matplotlib.use('Agg')
#try: os.environ['DISPLAY']
#except KeyError: matplotlib.use('Agg')
import numpy
import numpy as np
import yaml
import fitsio
from ugali.analysis.pipeline import Pipeline
from ugali.analysis.scan import Scan
import ugali.analysis.source
import ugali.analysis.loglike
import ugali.analysis.results
import ugali.utils.config
from ugali.utils.logger import logger
from ugali.utils.shell import mkdir
components = ['mcmc','membership','results','plot','collect','scan']
components = ['mcmc','membership','results','plot']
def make_filenames(config,label):
config = ugali.utils.config.Config(config)
outdir=config['output']['mcmcdir']
samfile=join(outdir,config['output']['mcmcfile']%label)
srcfile=samfile.replace('.npy','.yaml')
memfile=samfile.replace('.npy','.fits')
ret = dict(outfile=samfile,samfile=samfile,srcfile=srcfile,memfile=memfile)
return ret
def do_results(args):
""" Write the results output file """
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samples = filenames['samfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samples):
logger.warning("Couldn't find %s; skipping..."%samples)
return
logger.info("Writing %s..."%srcfile)
from ugali.analysis.results import write_results
write_results(srcfile,config,srcfile,samples)
def do_membership(args):
""" Write the membership output file """
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
memfile = filenames['memfile']
logger.info("Writing %s..."%memfile)
from ugali.analysis.loglike import write_membership
write_membership(memfile,config,srcfile,section='source')
def do_plot(args):
""" Create plots of mcmc output """
import ugali.utils.plotting
import pylab as plt
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samfile = filenames['samfile']
memfile = filenames['memfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samfile):
logger.warning("Couldn't find %s; skipping..."%samfile)
return
config = ugali.utils.config.Config(config)
burn = config['mcmc']['nburn']*config['mcmc']['nwalkers']
source = ugali.analysis.source.Source()
source.load(srcfile,section='source')
outfile = samfile.replace('.npy','.png')
ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn)
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5)
data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None
if data is not None:
plt.figure()
kernel,isochrone = source.kernel,source.isochrone
ugali.utils.plotting.plotMembership(config,data,kernel,isochrone)
outfile = samfile.replace('.npy','_mem.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter.plot6(data)
outfile = samfile.replace('.npy','_6panel.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
outfile = samfile.replace('.npy','_6panel.pdf')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
try:
title = name
plotter.plot4()
outfile = samfile.replace('.npy','_4panel.png')
logger.info(" Writing %s..."%outfile)
plt.suptitle(title)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
except:
logger.warning(" Failed to create plotter.plot4()")
def run(self):
if self.opts.coords is not None:
coords = self.opts.coords
names = vars(self.opts).get('names',len(coords)*[''])
else:
names,coords = self.parser.parse_targets(self.config.candfile)
labels=[n.lower().replace(' ','_').replace('(','').replace(')','') for n in names]
self.outdir=mkdir(self.config['output']['mcmcdir'])
self.logdir=mkdir(join(self.outdir,'log'))
args = list(zip(len(names)*[self.opts.config],names,labels,coords))
if 'mcmc' in self.opts.run:
logger.info("Running 'mcmc'...")
try: shutil.copy(self.opts.config,self.outdir)
except Exception as e: logger.warn(e.message)
for config,name,label,coord in args:
glon,glat,radius = coord
outfile = make_filenames(self.config,label)['samfile']
base = splitext(basename(outfile))[0]
logfile=join(self.logdir,base+'.log')
jobname=base
script = self.config['mcmc']['script']
nthreads = self.config['mcmc']['nthreads']
srcmdl = self.config['mcmc'].get('srcmdl')
if srcmdl is not None:
try: shutil.copy(srcmdl,self.outdir)
except Exception as e: logger.warn(e.message)
logger.info('%s (%s)'%(name,srcmdl))
cmd='%s %s --name %s --srcmdl %s %s' % (
script,self.opts.config,name,srcmdl,outfile)
else:
logger.info('%s (%.4f,%.4f)'%(name,glon,glat))
cmd='%s %s --name %s --gal %.4f %.4f --grid %s'% (
script,self.opts.config,name,glon,glat,outfile)
logger.info(cmd)
self.batch.submit(cmd,jobname,logfile,n=nthreads,a='mpirun')
if 'results' in self.opts.run:
logger.info("Running 'results'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_results,args)
else:
do_results(*args)
if 'membership' in self.opts.run:
logger.info("Running 'membership'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_membership,args)
else:
do_membership(*args)
if 'plot' in self.opts.run:
logger.info("Running 'plot'...")
if len(args) > 1:
pool = Pool(maxtasksperchild=1)
pool.map(do_plot,args)
#map(do_plot,args)
else:
do_plot(*args)
if 'collect' in self.opts.run:
logger.info("Running 'collect'...")
results = odict()
srcmdl = odict()
params = odict()
for config,name,label,coord in args:
srcfile = make_filenames(self.config,name)['srcfile']
results[name] = yaml.load(open(srcfile))['results']
srcmdl[name] = yaml.load(open(srcfile))['source']
params[name] = yaml.load(open(srcfile))['params']
for base,output in [('results.yaml',results),('srcmdl.yaml',srcmdl),('params.yaml',params)]:
outfile = join(self.outdir,base)
out = open(outfile,'w')
out.write(yaml.dump(output))
out.close()
if 'scan' in self.opts.run:
logger.info("Running 'scan'...")
for config,name,label,coord in args:
logdir = mkdir('plots/log')
logfile=join(logdir,'%s_lnlscan.log')
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'age','metallicity')
self.batch.submit(cmd,logfile=logfile)
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'metallicity','distance_modulus')
self.batch.submit(cmd,logfile=logfile)
cmd = 'python lnlscan.py %s --name %s --xpar %s --xbins 45 --ypar %s --ybins 45'%(self.opts.config,name,'age','distance_modulus')
self.batch.submit(cmd,logfile=logfile)
Pipeline.run = run
pipeline = Pipeline(__doc__,components)
pipeline.parser.add_coords(radius=True,targets=True)
pipeline.parser.add_ncores()
pipeline.parse_args()
pipeline.execute()
|
{
"content_hash": "afc5e7c0e656f8a78f4f8d1f14a85d7d",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 150,
"avg_line_length": 35.00829875518672,
"alnum_prop": 0.6158587175536329,
"repo_name": "DarkEnergySurvey/ugali",
"id": "59e7e949a191620260f33817c892bbf516fcc825",
"size": "8459",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ugali/pipeline/run_05.0_followup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355304"
},
{
"name": "Python",
"bytes": "949638"
}
],
"symlink_target": ""
}
|
class StaticMethodExample:
def __init__(self):
pass
@staticmethod
def foo():
pass
|
{
"content_hash": "dd69c9bf2e83c9b7304a704ff0efe95e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 26,
"avg_line_length": 14,
"alnum_prop": 0.5446428571428571,
"repo_name": "bruckhaus/challenges",
"id": "705f2bbe7c18d42676da6b74e4a420b6ddfabc79",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_challenges/static_method_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3364"
},
{
"name": "HTML",
"bytes": "12040"
},
{
"name": "Java",
"bytes": "255589"
},
{
"name": "Python",
"bytes": "121414"
},
{
"name": "Ruby",
"bytes": "16650"
},
{
"name": "Scala",
"bytes": "25224"
}
],
"symlink_target": ""
}
|
from mzc.group import Group
class CompareTeams(object):
"""Class that uses the rest of objects in MZ Compare to compare teams"""
def __init__(self, usernames):
self.usernames = usernames
self.g = Group(self.usernames)
self.g.get_details()
self.g.convert_currency()
for t in self.g.teams_list:
t.to_int(['age', 'birthDay', 'birthSeason', 'salary', 'value'])
t.get_top_11_data()
t.separate_juniors()
t.count_players()
t.get_sum_avg('age', True)
t.get_sum_avg('value', True)
t.get_sum_avg('salary', False)
if len(self.usernames) == 2:
t.get_max('age', True)
t.get_max('salary', True)
t.get_max('value', True)
def get_teams(self):
self.teams = []
for t in self.g.teams_list:
t.team_data['players'] = []
t.team_data['players'].extend(t.seniors)
t.team_data['players'].extend(t.juniors)
self.teams.append(t.team_data)
if t.team_data['teamId'] == self.g.team_data['teamId']:
t.team_data['highlight'] = True
return self.teams, self.g.teams_list[0].team_data['teamId']
|
{
"content_hash": "61c41c3d2512ebf8ef8042824be1e1f1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 36.02857142857143,
"alnum_prop": 0.5313243457573354,
"repo_name": "jreyes33/mzcompare",
"id": "70301aaec1baeb87535263fa821101f1c12ceebe",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mzc/compareteams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7786"
},
{
"name": "Python",
"bytes": "31807"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plugininstances', '0009_auto_20190715_1706'),
]
operations = [
migrations.AlterField(
model_name='pathparameter',
name='value',
field=models.CharField(max_length=200),
),
]
|
{
"content_hash": "e2dd60c3f1399efea74917644c7af6c7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 22,
"alnum_prop": 0.5880681818181818,
"repo_name": "FNNDSC/ChRIS_ultron_backEnd",
"id": "63ecda0f7000681ea8c888e4dd7ef959d0c7e89b",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chris_backend/plugininstances/migrations/0010_auto_20200131_1619.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3051"
},
{
"name": "HTML",
"bytes": "2839"
},
{
"name": "JavaScript",
"bytes": "262"
},
{
"name": "Python",
"bytes": "978019"
},
{
"name": "Shell",
"bytes": "74679"
}
],
"symlink_target": ""
}
|
from sys import exc_info, argv
import pandas as pd
import urllib
import xml.etree.ElementTree
import textwrap
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA,TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.preprocessing import Normalizer, normalize
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans, MiniBatchKMeans
query="http://export.arxiv.org/api/query?search_query=all:cloud+AND+robotics&start=0&max_results=2000"
response = urllib.urlopen(query).read()
e = xml.etree.ElementTree.fromstring(response)
d=[[" <br> ".join(textwrap.wrap(entry.find('{http://www.w3.org/2005/Atom}title').text.replace("\n",' '),60)),entry.find('{http://www.w3.org/2005/Atom}summary').text.replace("\n",' '),entry.find('{http://www.w3.org/2005/Atom}published').text.split("-")[0],entry.findall('{http://arxiv.org/schemas/atom}primary_category')[0].attrib['term'],entry.find('{http://www.w3.org/2005/Atom}id').text.replace("\n",' ')] for entry in e.findall('{http://www.w3.org/2005/Atom}entry')]
mydata = pd.DataFrame(d, columns = ["title", "abstract", "year","document identifier","uri"])
from synnet import *
from plotfcns import *
#X = transform(mydata['abstract'])
#vectorizer = CountVectorizer(min_df=1)
# or
#vectorizer = TfidfVectorizer(max_df=0.5, max_features=10000,
# min_df=2, stop_words='english',
# use_idf=True)
#X = vectorizer.fit_transform(mydata['abstract'])
#embeddings,dictionary,reverse_dictionary,count = transform(mydata['abstract'])
#pca=PCA(n_components=1, random_state=1);
#Xemb = pca.fit_transform(embeddings)
vectorizer = TfidfVectorizer(max_features=100000, stop_words=None, use_idf=True)
#vectorizer = CountVectorizer(vocabulary=dictionary)
X = vectorizer.fit_transform(mydata['abstract'])
pca=PCA(n_components=3, random_state=1);
#X1 = pca.fit_transform((X>0)*(Xemb.T*np.ones(Xemb.shape)))
X1 = pca.fit_transform(X.toarray())
#svd = TruncatedSVD(3)
#normalizer = Normalizer(copy=False)
#lsa = make_pipeline(svd, normalizer)
#X1 = lsa.fit_transform(X)
badtitle = ["In the News", "Table of", "Title page", "Front co", "Copyright not", "Content list", "Proceedings", "Contents"]
plot_n_save(mydata[~mydata['title'].str.contains('|'.join(badtitle),case=False)], X1, mydata['document identifier'], "type")
#plot2d_n_save(mydata[~mydata['title'].str.contains('|'.join(badtitle),case=False)], X1, mydata['document identifier'], "type")
|
{
"content_hash": "8ed0503777bbaa05c5baded2ac60103b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 469,
"avg_line_length": 44.175438596491226,
"alnum_prop": 0.7188244638602065,
"repo_name": "slremy/cloudroboticsreferences",
"id": "c5dfd2997bfefb788d9d02b8696e21cd9969e944",
"size": "2518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processarxiv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29412"
},
{
"name": "TeX",
"bytes": "61416078"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Uri(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param match_type: {"enum": ["contains", "ends-with", "equals", "starts-with"], "type": "string", "description": "'contains': Match URI if request URI contains specified URI; 'ends-with': Match URI if request URI ends with specified URI; 'equals': Match URI if request URI equals specified URI; 'starts-with': Match URI if request URI starts with specified URI; ", "format": "enum"}
:param uri_str: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Specify URI string", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "uri"
self.DeviceProxy = ""
self.match_type = ""
self.uri_str = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "total_count", "hit_count"], "type": "string", "description": "'all': all; 'total_count': total_count; 'hit_count': hit_count; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "sampling-enable"
self.DeviceProxy = ""
self.counters1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AccessList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param acl_name: {"not": "acl-id", "enum": ["ip-name", "ipv6-name"], "type": "string", "description": "'ip-name': Apply an IP named access list; 'ipv6-name': Apply an IPv6 named access list; ", "format": "enum"}
:param acl_id: {"description": "ACL id", "format": "number", "maximum": 199, "minimum": 1, "not": "acl-name", "type": "number", "$ref": "/axapi/v3/access-list/standard"}
:param name: {"minLength": 1, "maxLength": 16, "type": "string", "description": "Specify Named Access List", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "access-list"
self.DeviceProxy = ""
self.acl_name = ""
self.acl_id = ""
self.name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AaaRule(A10BaseClass):
"""Class Description::
Rules of AAA policy.
Class aaa-rule supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param index: {"description": "Specify AAA rule index", "format": "number", "type": "number", "maximum": 256, "minimum": 1, "optional": false}
:param match_encoded_uri: {"default": 0, "optional": true, "type": "number", "description": "Enable URL decoding for URI matching", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param authorize_policy: {"description": "Specify authorization policy to bind to the AAA rule", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/aam/authorization/policy"}
:param uri: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "match-type": {"enum": ["contains", "ends-with", "equals", "starts-with"], "type": "string", "description": "'contains': Match URI if request URI contains specified URI; 'ends-with': Match URI if request URI ends with specified URI; 'equals': Match URI if request URI equals specified URI; 'starts-with': Match URI if request URI starts with specified URI; ", "format": "enum"}, "uri-str": {"minLength": 1, "maxLength": 128, "type": "string", "description": "Specify URI string", "format": "string-rlx"}}}]}
:param action: {"optional": true, "enum": ["allow", "deny"], "type": "string", "description": "'allow': Allow traffic that matches this rule; 'deny': Deny traffic that matches this rule; ", "format": "enum"}
:param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "total_count", "hit_count"], "type": "string", "description": "'all': all; 'total_count': total_count; 'hit_count': hit_count; ", "format": "enum"}}}]}
:param domain_name: {"description": "Specify domain name to bind to the AAA rule (ex: a10networks.com, www.a10networks.com)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param authentication_template: {"description": "Specify authentication template name to bind to the AAA rule", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/aam/authentication/template"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/aaa-policy/{name}/aaa-rule/{index}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "index"]
self.b_key = "aaa-rule"
self.a10_url="/axapi/v3/aam/aaa-policy/{name}/aaa-rule/{index}"
self.DeviceProxy = ""
self.index = ""
self.match_encoded_uri = ""
self.uuid = ""
self.authorize_policy = ""
self.uri = []
self.action = ""
self.sampling_enable = []
self.domain_name = ""
self.authentication_template = ""
self.access_list = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "caac5803c54b22ecdadf15d51232535f",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 650,
"avg_line_length": 50.544,
"alnum_prop": 0.6215574548907882,
"repo_name": "a10networks/a10sdk-python",
"id": "8fc788204c9bb25bdc966acae7069cbd702edcca",
"size": "6318",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/aam/aam_aaa_policy_aaa_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
import pdb
import os
import numpy as np
from tempfile import mkdtemp
def get_digi_train_period(abr_header):
digi_trains = np.where(abr_header['ext_epoch_waveform_pulses']\
['nDigitalTrainValue'][0]==4)[0]
if not np.size(digi_trains)==0:
return (abr_header['ext_multi-chan_inf']['lEpochPulsePeriod'][0])
def dac_step(level, leng):
from numpy import tile
return tile(lvl, leng)
def dac_ramp(rampto, startat, leng):
from numpy import r_
from scipy.interpolate import interp1d
# make interpolation function, prob better way
return (interp1d([0,leng], [startat,rampto]))
def are_epochs_ramps(abr_header, DAC_num):
epoch_types =\
abr_header['ext_epoch_waveform_pulses']['nEpochType'][0][DAC_num]
return (epoch_types==2)
def find_actv_epchs(abr_header, DAC_num):
epch_types = abr_header['ext_epoch_waveform_pulses']\
['nEpochType'][0, DAC_num]
actv_epchs = np.nonzero(epch_types)[0]
# set inactive epchs that are zero length
eewp = abr_header['ext_epoch_waveform_pulses']
num_epsds = get_num_episodes(abr_header)
dur_inits = eewp['lEpochInitDuration'][0]\
[DAC_num][actv_epchs]
dur_incrms = eewp['lEpochDurationInc'][0]\
[DAC_num][actv_epchs]
NonZDurEpchs = np.where(np.abs(dur_incrms) + np.abs(dur_inits)!=0)
NonZActvEpchs = actv_epchs[NonZDurEpchs]
return NonZActvEpchs
def get_num_episodes(abf_header):
return abf_header['fid_size_info']['lActualEpisodes'][0]
def make_range_array(abf_header, DAC_num):
num_epsds = get_num_episodes(abf_header)
actv_epchs = find_actv_epchs(abf_header, DAC_num)
num_epchs = len(actv_epchs)
range_array = np.tile(np.c_[0:num_epsds],(1,num_epchs))
return range_array
def get_dp_pad(abf_header):
SmplsPerEpsd = abf_header['trial_hierarchy']\
['lNumSamplesPerEpisode'][0]
num_chans = get_num_chans(abf_header)
RowsPerEpsd = (SmplsPerEpsd/num_chans)
# FINALLY FIGURED OUT HOW THE PRE AND POST HOLDS ARE
# DETERMINED, THEY ARE EPISODE LENGTH / 64 (MODULO DIV)
# from the pclamp guide on LTP of all things
dp_one_side_pad = int(RowsPerEpsd) // int(64)
return dp_one_side_pad
def make_epch_levels(abf_header, DAC_num):
eewp = abf_header['ext_epoch_waveform_pulses']
num_epsds = get_num_episodes(abf_header)
actv_epchs = find_actv_epchs(abf_header, DAC_num)
num_epchs = len(actv_epchs)
# construct the levels array
level_inits = eewp['fEpochInitLevel'][0]\
[DAC_num][actv_epchs]
level_incrms = eewp['fEpochLevelInc'][0]\
[DAC_num][actv_epchs]
range_array = make_range_array(abf_header, DAC_num)
tmp_lvl_incrms = np.tile(level_incrms, (num_epsds,1)) * range_array
levels = np.tile(level_inits,(num_epsds,1))+tmp_lvl_incrms
return levels
def make_epch_durs(abf_header, DAC_num):
eewp = abf_header['ext_epoch_waveform_pulses']
num_epsds = get_num_episodes(abf_header)
actv_epchs = find_actv_epchs(abf_header, DAC_num)
dur_inits = eewp['lEpochInitDuration'][0]\
[DAC_num][actv_epchs]
dur_incrms = eewp['lEpochDurationInc'][0]\
[DAC_num][actv_epchs]
range_array = make_range_array(abf_header, DAC_num)
tmp_dur_incrms = np.tile(dur_incrms, (num_epsds,1)) * range_array
durs = np.tile(dur_inits,(num_epsds,1))+tmp_dur_incrms
return (durs)
def get_epch_types(abf_header, DAC_num):
# epoch types
actv_epchs = find_actv_epchs(abf_header, DAC_num)
eewp = abf_header['ext_epoch_waveform_pulses']
epoch_types = eewp['nEpochType'][0]\
[DAC_num][actv_epchs]
return (epoch_types)
def get_num_chans(abf_header):
return np.sum(abf_header['multi-chan_inf']['nADCSamplingSeq'][0]!=-1)
def get_total_aquired(abf_header):
return (abf_header['fid_size_info']['lActualAcqLength'][0])
def sample_rate(header):
# sample interval in microseconds, so hertz are * 10e6
sample_rate = \
1 * 1000000\
/header['trial_hierarchy']['fADCSampleInterval']\
/header['trial_hierarchy']['nADCNumChannels']
sample_rate = sample_rate[0]
return sample_rate
def get_DAC_len(abf_header):
tot_aq = get_total_aquired(abf_header)
num_chns = get_num_chans(abf_header)
return ( tot_aq // num_chns )
def get_sweep_len(abf_header):
DAC_ln = get_DAC_len(abf_header)
num_epsds = get_num_episodes(abf_header)
return ( DAC_ln // num_epsds )
def get_epsd_len(abf_header):
swp_ln = sweep_len(abf_header)
pad = get_dp_pad(abf_header)
return ( swp_ln - 2*pad )
def make_swp_indxs(abf_header):
sweep_len = get_sweep_len(abf_header)
DAC_len = get_DAC_len(abf_header)
lft = np.r_[0:DAC_len+1:sweep_len][:-1]
rght = np.r_[0:DAC_len+1:sweep_len][1:]
sweep_indxs = np.c_[lft,rght]
return sweep_indxs
def make_epsd_indxs(abf_header):
sweep_len = get_sweep_len(abf_header)
sweep_indxs = make_swp_indxs(abf_header)
epsd_indxs = np.copy(sweep_indxs)
pad = get_dp_pad(abf_header)
epsd_indxs[:,0] = sweep_indxs[:,0] + pad
epsd_len = sweep_len - 2*pad
epsd_indxs[:,1] = epsd_indxs[:,0]+epsd_len
return epsd_indxs
def make_epch_indxs(abf_header, DAC_num, nrmd = False):
#### epoch starts (rel to episode start) ####
# assume left points are fixed in
# duration changing epochs. I hope this means the maximum duration
# for an epoch (over the whole trail) must pass before next epoch
# statrs
durs = make_epch_durs(abf_header, DAC_num)
max_dur = np.max(durs,0)
num_epsds = get_num_episodes(abf_header)
max_durs = np.tile(max_dur, (num_epsds,1))
# accumulate durations (add a zero column) to get starts
strts = np.c_[np.repeat(0,num_epsds), max_durs[:,0:-1]]
epch_strt_indxs = np.add.accumulate(strts,1)
#### epoch starts (rel to 0) ####
actv_epchs = find_actv_epchs(abf_header, DAC_num)
num_epchs = len(actv_epchs)
epsd_indxs = make_epsd_indxs(abf_header)
for i_epch in range(num_epchs):
if nrmd=='sweep':
epch_strt_indxs[:,i_epch] += (epsd_indxs[0,0])
elif nrmd=='episode':
pass
else:
epch_strt_indxs[:,i_epch]+=epsd_indxs[:,0]
epch_end_indxs = epch_strt_indxs+durs
epch_indx_list = []
for i_epsd in range(num_epsds):
tmp_epsd_list=[]
for i_epch in range(num_epchs):
t = [epch_strt_indxs[i_epsd,i_epch],
epch_end_indxs[i_epsd,i_epch]]
tmp_epsd_list.append(t)
epch_indx_list.append(tmp_epsd_list)
epch_indxs = np.array(epch_indx_list)
return epch_indxs
class DAC(object):
def __init__(self, abf_header, DAC_num, cap_pad_ms = 1.5, **kwds):
from abf_header_dtype import abf_header_dtype
assert 0 <= DAC_num < 2, "DAC_num must be 0 or 1"
assert abf_header.dtype is abf_header_dtype, "header has wrong dtype"
self._num_episodes = get_num_episodes(abf_header)
self._actv = abf_header['ext_epoch_waveform_pulses']['nWaveformEnable'][0][DAC_num]
self._epsd_ixs = make_epsd_indxs(abf_header)
self._epch_ixs = make_epch_indxs(abf_header, DAC_num)
self._epch_lvls = make_epch_levels(abf_header, DAC_num)
self._DAC_num = DAC_num
self._cap_pad_ms = cap_pad_ms
self._cap_pad_dp = int(cap_pad_ms//1000.*sample_rate(abf_header))
super(DAC, self).__init__(**kwds)
self._actv_epchs = find_actv_epchs(abf_header, DAC_num)
self._num_epchs = len(self._actv_epchs)
self._cap_pad = False
self._make_epoch_slices()
def _make_epoch_slices(self):
# make epoch slices
self._epch_slices = []
for epsd_i in range(self._num_episodes):
_tmp = []
for epch_i in range(self._num_epchs):
_tmp.append(slice(self._epch_ixs[epsd_i, epch_i, 0],
self._epch_ixs[epsd_i, epch_i, 1]))
self._epch_slices.append(_tmp)
def pad_cap(self, pad=True):
if pad is True and self._cap_pad is not True:
self._epch_ixs[:,:,0]+=self._cap_pad_dp
self._make_epoch_slices()
self._cap_pad = True
elif pad is False and self._cap_pad is True:
self._epch_ixs[:,:,0]-=self._cap_pad_dp
self._make_epoch_slices()
self._cap_pad = False
def __iter__(self, epoch = 1):
for epsd_num in range(self._num_episodes):
yield self._epch_slices[epsd_num][epoch]
class abf_reader(object):
from mhp_re import yyyy_mm_dd_nnnn
fnum_re = yyyy_mm_dd_nnnn
def __init__(self, fname, **kwds):
from abf_header_dtype import abf_header_dtype
self._headr_struct = abf_header_dtype
self.fname = os.path.basename(fname)
if abf_reader.fnum_re.search(self.fname):
m = abf_reader.fnum_re.search(self.fname)
self.fnum = m.groups()[-1]
else:
self.fnum = 'NA'
if os.path.isabs(fname):
self.path = os.path.dirname(fname)
else:
self.path = os.path.dirname(os.path.abspath(fname))
self.path_file = self.path + os.sep + self.fname
self.fid = open(self.path_file, 'rb')
self.read_header()
# make sure that I have a compatible abf version
self.verify_version()
self.addGain()
self._chan_holder = -1
self._num_episodes = \
get_num_episodes(self.header)
# rewrite the ADC units into a convience variable, trunc to 2 chars
self._ADC_Units = \
np.array(self.header['multi-chan_inf']['sADCUnits'][0],
dtype = '|S2')
# rewrite the DAC units into a convience variable, trunc to 2 chars
self._DAC_Units = \
np.array(self.header['multi-chan_inf']['sDACChannelUnits'][0],
dtype = '|S2')
# make an atomic size, so that data can be broken up with out
# segmenting cols(channels)
if self.header['f_structure']['nDataFormat'][0]==1: #float data
self.base_size = 4 * self.num_chans() # 4byte size per float
elif self.header['f_structure']['nDataFormat'][0]==0: #integer data
self.base_size = 2 * self.num_chans() # 2byte size per int
# check operation type, continous is 3 episodic stimulation is 5
if self.header['fid_size_info']['nOperationMode'][0]==5:
self.DAC_0 = DAC(self.header, 0)
self.DAC_1 = DAC(self.header, 1)
# for covience, make a protocol variable
self.protocol = self.header['ext_environment_inf']['sProtocolPath'][0].rstrip()
# to deal with bad
self.bad_tele = False
if 'bad_tele' in kwds.keys():
assert type(kwds['bad_tele']) is bool, 'must be bool'
self.bad_tele = kwds.pop('bad_tele')
if self.bad_tele:
self._fix_bad_tele()
def verify_version(self):
FVerNum = self.header['fid_size_info']['fFileVersionNumber']
ErrMsg = "%s is version %f, this 'prog' only reads abf 1.8" % (self.fname, FVerNum)
assert (FVerNum>=1.8) & (FVerNum<2.0), ErrMsg
def hdr_offset(self):
from abf_header_defs import ABF_BLOCKSIZE
return ((self.header['f_structure']['lDataSectionPtr'] * ABF_BLOCKSIZE)[0])
def total_aq(self):
return (self.header['fid_size_info']['lActualAcqLength'][0])
def actv_dacs(self):
return (np.nonzero(self.header['ext_epoch_waveform_pulses']\
['nWaveformEnable'][0])[0])
# custom get and set state allow pickle to handel the pickleing of
# object with out choking on file
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['fid'] # remove filehandle entry
if 'mm' in odict.keys(): del odict['mm'] # clear memorym when serializing
return odict
def __setstate__(self, dict):
path_file = dict['path_file']
self.fid = open(path_file, 'rb')
def read_header(self):
self.fid.seek(0)
self.header = np.fromfile(self.fid, self._headr_struct, 1)
def _read_seq(self):
return [read for read in self.header['multi-chan_inf']['nADCSamplingSeq'][0] if read != -1]
def next_chan(self):
self._chan_holder += 1
if self._chan_holder > self.num_chans()-1:
raise StopIteration
return self._chan_holder
def num_chans(self):
return len(self._read_seq())
def get_chan_name(self, grep_str):
import re
from re import compile as recomp
prog = recomp(grep_str, flags = re.I)
has_name = []
self._chan_holder = -1
try:
while True:
chan_indx = self.next_chan()
chan_name = self.chan_name(chan_indx)
result = prog.search(chan_name)
if (result):
has_name.append(chan_indx)
except StopIteration:
self._chan_holder = -1
return has_name
def chan_names(self):
adc_l = ['adc_' + str(read) for read in np.r_[0:16]]
chans = self.header['multi-chan_inf']['sADCChannelName'][0]
sampled_chans = self._read_seq()
#these list of sampled chans is in the order it was sampled
for num, sampled_chan in enumerate(sampled_chans):
print('%-3s' '%-3s' '%-8s' '%-10s' '%-10s' %(num, '-'*3,
adc_l[sampled_chan],
'-'*8, chans[sampled_chan]))
def chan_name(self, chan_no=0):
chans = self.header['multi-chan_inf']['sADCChannelName'][0]
sampled_chans = self._read_seq()
#rstrip removes the trailing white space
return chans[sampled_chans[chan_no]].rstrip()
def read_data(self, **kwds):
'''reads multiplexed data from abfs into an array'''
## the times that are asso with discontinuous recording are wonky
from numpy import float32, int16, memmap
# prep to establish order of array in memory
from abf_header_defs import ABF_BLOCKSIZE
offset = self.hdr_offset()
total_aq = self.total_aq()
numchans = self.num_chans()
ncols = numchans
nrows = total_aq//numchans
# handle optional kwds, for subsetting data
# start_row and needs to be transulated into byte offset
# other kwds, num_rows or stop_row do not
if 'start_time' in kwds.keys():
start_time = kwds.pop('start_time')
kwds['start_row'] = int(start_time * self.sample_rate())
if 'stop_time' in kwds.keys():
stop_time = kwds.pop('stop_time')
kwds['stop_row'] = int(stop_time * self.sample_rate())
if 'r_slice' in kwds.keys():
row_slice = kwds.pop('r_slice')
start_row = row_slice.start
offset += (start_row * self.base_size)
stop_row = row_slice.stop
# check if start_row is beginning
if offset!=self.hdr_offset:
nrows = stop_row - start_row
elif offset==self.hdr_offset:
nrows = stop_row
if 'start_row' in kwds.keys():
start_row = kwds.pop('start_row')
offset += (start_row * self.base_size)
if 'num_rows' in kwds.keys():
nrows = kwds.pop('num_rows')
if 'stop_row' in kwds.keys():
# check if start_row is beginning
stop_row = kwds.pop('stop_row')
if offset!=self.hdr_offset:
nrows = stop_row - start_row
elif offset==self.hdr_offset:
nrows = stop_row
#see if is float data
if self.header['f_structure']['nDataFormat'][0]==1:
data = memmap(self.fid, dtype = float32,
shape = (nrows,ncols), offset = offset)
data = np.copy(data)
return data
#see if is integer data
elif self.header['f_structure']['nDataFormat'][0]==0:
unscl_data = memmap(self.fid, dtype = int16,
shape = (nrows,ncols),
mode = 'r',offset = offset)
# now scale data and return
unscl_data = unscl_data.astype(float32)
return (self.scale_int_data(unscl_data))
def scale_int_data(self, data):
for indx, chan in enumerate(self._read_seq()):
divis = (self.header['multi-chan_inf']['fInstrumentScaleFactor'][0][chan] * \
self.header['multi-chan_inf']['fSignalGain'][0][chan] * \
self.header['multi-chan_inf']['fADCProgrammableGain'][0][chan] * \
self.addGain[chan])
mult = self.header['hardware_inf']['fADCRange'][0] \
/ self.header['hardware_inf']['lADCResolution'][0]
offs = self.header['multi-chan_inf']['fInstrumentOffset'][0][chan] - \
self.header['multi-chan_inf']['fSignalOffset'][0][chan]
data[:,indx] = data[:,indx] / divis * mult + offs
return data
def addGain(self):
'''method helps with scaling'''
self.addGain = self.header['ext_environment_inf']['nTelegraphEnable'][0] * \
self.header['ext_environment_inf']['fTelegraphAdditGain'][0]
self.addGain = np.where(self.addGain==0, 1, self.addGain)
def _fix_bad_tele(self):
'''hack to work around abf 1.8s created with clampfit10'''
self.fid_w = open(self.path_file, 'r+b')
self.fid_w.seek(0)
# just disable the telegraph gains by setting to zero
self.header['ext_environment_inf']['nTelegraphEnable'][0]*=0
self.fid_w.write(self.header)
self.fid_w.close()
del self.fid_w
def get_synch_array(self):
from abf_header_defs import ABF_BLOCKSIZE
self.fid.seek(self.header['f_structure']['lSynchArrayPtr'][0] * ABF_BLOCKSIZE)
synch_array_dtype = [('start', np.int32), ('length', np.int32)]
synch_array = np.fromfile(self.fid,
synch_array_dtype,
self.header['f_structure']['lSynchArraySize'])
sl = []
for strt, length in synch_array:
sl.append([strt, length])
sa = np.array(sl)
return sa
def sample_rate(self):
try:
self._sample_rate
return self._sample_rate
except AttributeError:
# sample interval in microseconds, so hertz are * 10e6
self._sample_rate = \
1 * 1000000\
/self.header['trial_hierarchy']['fADCSampleInterval']\
/self.header['trial_hierarchy']['nADCNumChannels']
self._sample_rate = self._sample_rate[0]
return self._sample_rate
def xstep(self):
try:
self._xstep
return self._xstep
except AttributeError:
# sample interval in microseconds, so hertz are * 10e6
return 1/self.sample_rate()
def start_time(self, new_time = None):
if new_time is None:
try:
return self._file_start_time
except AttributeError:
from datetime import datetime
self._File_Time = {}
yyyymmdd = str(self.header['fid_size_info']['lFileStartDate'][0])
self._File_Time['year'] = int(yyyymmdd[0:4])
self._File_Time['month'] = int(yyyymmdd[4:6])
self._File_Time['day'] = int(yyyymmdd[-2:])
# 'lFileStartTime is in seconds. do some division for time
seconds_time = self.header['fid_size_info']['lFileStartTime'][0]
self._File_Time['hour'] = seconds_time//(60*60)
self._File_Time['minute'] = (seconds_time%(60*60))//60
self._File_Time['second'] = (seconds_time%(60*60))%60
self._File_Time['microsecond'] = \
int(self.header['environment_inf']['nFileStartMillisecs'][0]\
* 1000)
#for reading self._File_Time = t_d
t_d = self._File_Time
self._file_start_time = datetime(t_d['year'],\
t_d['month'] , t_d['day'],\
t_d['hour'],t_d['minute'],\
t_d['second'],t_d['microsecond'])
return self._file_start_time
else:
from datetime import datetime
from math import floor
assert type(new_time) is datetime
NewTimeDayStart = datetime(new_time.year,
new_time.month,
new_time.day)
FileStartDate = np.zeros(1, dtype = np.int32)
FileStartDate[:] = new_time.year * 10**4 + \
new_time.month * 10**2 +\
new_time.day
FileStartTime = np.zeros(1,np.int32)
Seconds = (new_time - NewTimeDayStart).total_seconds()
FileStartTime[:] = floor(Seconds)
FileStartMilli = np.zeros(1,np.int32)
FileStartMilli[:] = int((Seconds - floor(Seconds))*1000)
self.header['fid_size_info']['lFileStartDate'][0] = FileStartDate
self.header['fid_size_info']['lFileStartTime'][0] = FileStartTime
self.header['environment_inf']['nFileStartMillisecs'][0] = FileStartMilli
self.fid_w = open(self.path_file, 'r+b')
self.fid_w.seek(0)
self.fid_w.write(self.header)
self.fid_w.close()
del self.fid_w
try:
del self._file_start_time
except AttributeError:
pass
def stop_watch_time(self):
return int(self.header['fid_size_info']['lStopwatchTime'][0])
if __name__=='__main__':
import os
pth = os.path.join(os.environ.get("LABDIR"),
'B44_B48_experiments','cng_current',
'cbi2_current_time_course','expts',
'2013_12_10','clmpx_bin')
abr = abf_reader(os.path.join(pth,'2013_12_10_0023.abf'))
d = abr.DAC_0
|
{
"content_hash": "a23d4618411ad01cfda56183628235b5",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 99,
"avg_line_length": 39.467372134038804,
"alnum_prop": 0.5721690946465279,
"repo_name": "matthewperkins/abf_reader",
"id": "7eae64c9a8aff58631198838548d569f14f52cf1",
"size": "22378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abf_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82260"
}
],
"symlink_target": ""
}
|
__author__ = 'Dmitry Golubkov'
__email__ = 'dmitry.v.golubkov@cern.ch'
from django.core.management.base import BaseCommand
from optparse import make_option
from taskengine.atlas.taskdef import TaskDefinition
from taskengine.atlas.datamgmt import AMIWrapper
from deftcore.log import Logger
logger = Logger().get()
class Command(BaseCommand):
can_import_settings = True
option_list = BaseCommand.option_list + (
make_option('-n', '--name',
type='choice',
action='store',
dest='worker_name',
choices=['process_requests',
'sync_ami_projects',
'sync_ami_types',
'sync_ami_phys_containers',
'sync_ami_tags',
'create_task_input',
'define_task_simulation_type',
'search_task_duplicates'],
default=None,
help=''),
)
def handle(self, *args, **options):
if options['worker_name'] == 'process_requests':
task_def = TaskDefinition(debug_mode=False)
task_def.process_requests(restart=False, no_wait=False)
elif options['worker_name'] == 'sync_ami_projects':
ami_wrapper = AMIWrapper()
ami_wrapper.sync_ami_projects()
elif options['worker_name'] == 'sync_ami_types':
ami_wrapper = AMIWrapper()
ami_wrapper.sync_ami_types()
elif options['worker_name'] == 'sync_ami_phys_containers':
ami_wrapper = AMIWrapper()
ami_wrapper.sync_ami_phys_containers()
elif options['worker_name'] == 'sync_ami_tags':
ami_wrapper = AMIWrapper()
ami_wrapper.sync_ami_tags()
elif options['worker_name'] == 'create_task_input':
from debug import create_task_input
create_task_input()
elif options['worker_name'] == 'define_task_simulation_type':
from debug import define_task_simulation_type
define_task_simulation_type()
elif options['worker_name'] == 'search_task_duplicates':
from debug import search_task_duplicates
search_task_duplicates()
|
{
"content_hash": "b37ac282905b87bbb5a1ade9c488acb2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 41.30357142857143,
"alnum_prop": 0.5512321660181583,
"repo_name": "retmas-dv/panda-deft",
"id": "c97140cb27a8b890f24cd53868149f7e79cb9140",
"size": "2313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deftcore/server/management/commands/runworker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "371317"
}
],
"symlink_target": ""
}
|
from typing import Tuple
from amino.state.maybe import MaybeState
from amino import Maybe, Just, Left, Right, Either, Id, List
from amino.state.either import EitherState
from amino.state.eval import EvalState
from amino.state.id import IdState
from amino.test.spec_spec import Spec
class State3Spec(Spec):
def types(self) -> None:
s: MaybeState[int, int] = MaybeState.pure(5)
x: Maybe[Tuple[int, int]] = s.run(2)
x
def pure(self) -> None:
assert(MaybeState.s(str).pure(1).run('state') == Just(('state', 1)))
def flat_map(self) -> None:
s = MaybeState.s(str).pure(1)
def f(a: int) -> MaybeState[str, int]:
return MaybeState.s(str).inspect(lambda s: len(s) + a)
s1 = s.flat_map(f)
assert(s1.run('str') == Just(('str', 4)))
def modify(self) -> None:
MaybeState.s(str).modify((lambda s: s + ' updated')).run_s('state').should.equal(Just('state updated'))
def modify_f(self) -> None:
MaybeState.s(str).modify_f((lambda s: Just(s + ' updated'))).run_s('state').should.equal(Just('state updated'))
def flat_map_f(self) -> None:
l = Left('boing')
EitherState.pure(1).flat_map_f(lambda a: l).run('start').should.equal(l)
def zip(self) -> None:
EvalState.pure(1).zip(EvalState.pure(2)).run_a(None)._value().should.equal(List(1, 2))
def eff(self) -> None:
def f(a: int) -> EvalState[int, Either[str, int]]:
return EvalState.pure(Right(2))
s0: EvalState[int, Either[str, int]] = EvalState.s(int).pure(Right(1))
s0.eff(Either).flat_map(f).value.run(1)._value().should.equal((1, Right(2)))
assert ((s0 // EvalState.s(int).modify(lambda a: a).replace).eff(Either).flat_map(f).value.run(1)._value() ==
(1, Right(2)))
def id(self) -> None:
s = IdState.s(int).inspect(lambda s0: s0 * 2).flat_map(lambda a: IdState.pure(a + 4))
s.run(5).should.equal(Id((5, 14)))
def transform_s(self) -> None:
def trans_from(r: str) -> int:
return int(r)
def trans_to(r: str, s: int) -> str:
return str(s)
s1 = IdState(Id(lambda s: Id((s + 1, None)))).transform_s(trans_from, trans_to)
s1.run_s('2').value.should.equal('3')
def transform_f(self) -> None:
MaybeState.pure(7).transform_f(EitherState, lambda m: m.to_either('none')).run_a(None).should.equal(Right(7))
def lift_left(self) -> None:
EitherState.lift(Left(1)).run_a(None).should.equal(Left(1))
__all__ = ('State3Spec',)
|
{
"content_hash": "fa9b52ac7708f706da423de09fa1fed6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 119,
"avg_line_length": 37.85294117647059,
"alnum_prop": 0.5936285936285937,
"repo_name": "tek/amino",
"id": "614109c3e829932d5081151968e29440c3e53bd8",
"size": "2574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit/state_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341735"
}
],
"symlink_target": ""
}
|
class Evaluator:
def __init__(self, lexer):
self.__lexer = lexer
def evaluate(self, line):
return int(next(self.__lexer.tokenize(line)).raw_value)
class REPL:
def __init__(self, read, print, evaluate):
self.__read = read
self.__eval = evaluate
self.__print = print
def loop(self):
while True:
try:
line = self.__read('mm-i> ')
result = self.__eval(line)
self.__print(result)
except KeyboardInterrupt:
break
if __name__ == '__main__':
from lexer import Lexer
REPL(input, print, Evaluator(Lexer()).evaluate).loop()
|
{
"content_hash": "ea658f34696ac50cc6fda8ab724a39ed",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.522189349112426,
"repo_name": "PolyglotSymposium/mm-i",
"id": "f700f33dffc00e1f53d3e1c3b1d4f0189b0c7b82",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/repl.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21478"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "VimL",
"bytes": "652"
}
],
"symlink_target": ""
}
|
from google.cloud import edgecontainer_v1
async def sample_create_vpn_connection():
# Create a client
client = edgecontainer_v1.EdgeContainerAsyncClient()
# Initialize request argument(s)
vpn_connection = edgecontainer_v1.VpnConnection()
vpn_connection.name = "name_value"
request = edgecontainer_v1.CreateVpnConnectionRequest(
parent="parent_value",
vpn_connection_id="vpn_connection_id_value",
vpn_connection=vpn_connection,
)
# Make the request
operation = client.create_vpn_connection(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END edgecontainer_v1_generated_EdgeContainer_CreateVpnConnection_async]
|
{
"content_hash": "2c3d8ee832308c5f67afc2eae41845ab",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 28.071428571428573,
"alnum_prop": 0.7175572519083969,
"repo_name": "googleapis/python-edgecontainer",
"id": "842695613379f460f4a67e0ba48121ad2bd57292",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/edgecontainer_v1_generated_edge_container_create_vpn_connection_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "607901"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
}
|
import shutil
from unittest import TestCase
from pathlib import Path
from src.dokuwiki_to_hugo import DokuWikiToHugo
class TestDokuWikiToHugo(TestCase):
def tearDown(self):
shutil.rmtree('output')
pass
def test_doku_to_hugo_converts_home_to_index_markdown_files(self):
DokuWikiToHugo().doku_to_hugo('test/subdir')
expected = Path("output/test/subdir/_index.md").read_text()
self.assertIn('subdir index', expected)
def test_convert_whole_dir(self):
DokuWikiToHugo().doku_to_hugo('test/subdir')
expected = Path("output/test/subdir/moar/dokuwiki_header_in_subdir.md").read_text()
self.assertIn('+++', expected) # header is there, check
self.assertIn('##### some header', expected) # some conversion done, check
|
{
"content_hash": "371c549d3fe5ffb0518e62f335b62792",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 32.08,
"alnum_prop": 0.6783042394014963,
"repo_name": "wgroeneveld/dokuwiki-to-hugo",
"id": "8a13c8e032764754754b032fe0f0a562b668bdac",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_dokuwiki_to_hugo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "324"
},
{
"name": "Python",
"bytes": "35879"
}
],
"symlink_target": ""
}
|
import functools
from fabric.api import run, env
def dir_exists(dir):
# return run('[ -d %s ] && echo 1 || echo 0' % dir) == '1'
pass
def with_defaults(func):
"""A decorator that sets all defaults for a task."""
@functools.wraps(func)
def decorated(*args, **kwargs):
env.setdefault('use_sudo', True)
env.setdefault('git_branch', 'master')
env.setdefault('python_bin', 'python')
env.setdefault('remote_owner', 'www-data')
env.setdefault('remote_group', 'www-data')
env.setdefault('domain_path', "%(base_dir)s/%(app_name)s" % \
{ 'base_dir':env.base_dir,
'app_name':env.app_name })
env.setdefault('current_path', "%(domain_path)s/current" % \
{ 'domain_path':env.domain_path })
env.setdefault('releases_path', "%(domain_path)s/releases" % \
{ 'domain_path':env.domain_path })
env.setdefault('shared_path', "%(domain_path)s/shared" % \
{ 'domain_path':env.domain_path })
if not env.has_key('releases'):
if dir_exists(env.releases_path):
env.releases = sorted(run('ls -x %(releases_path)s' % { 'releases_path':env.releases_path }).split())
if len(env.releases) >= 1:
env.current_revision = env.releases[-1]
env.current_release = "%(releases_path)s/%(current_revision)s" % \
{ 'releases_path':env.releases_path,
'current_revision':env.current_revision }
if len(env.releases) > 1:
env.previous_revision = env.releases[-2]
env.previous_release = "%(releases_path)s/%(previous_revision)s" % \
{ 'releases_path':env.releases_path,
'previous_revision':env.previous_revision }
return func(*args, **kwargs)
return decorated
|
{
"content_hash": "96e231a8b0f4acfe292d1c1d632ad461",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 117,
"avg_line_length": 47.91111111111111,
"alnum_prop": 0.48794063079777367,
"repo_name": "peroshi/fabistrano",
"id": "5f05fd53ab764acc4a7d4c2dfcb277fe15059d4f",
"size": "2156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabistrano/helpers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7428"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
from django.utils.translation import get_language_info
import pytz
from appconf import AppConf
def load_path_attr(path):
i = path.rfind(".")
module, attr = path[:i], path[i+1:]
try:
mod = importlib.import_module(module)
except ImportError as e:
raise ImproperlyConfigured("Error importing {0}: '{1}'".format(module, e))
try:
attr = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured("Module '{0}' does not define a '{1}'".format(module, attr))
return attr
class AccountAppConf(AppConf):
OPEN_SIGNUP = True
LOGIN_URL = "account_login"
SIGNUP_REDIRECT_URL = "/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
PASSWORD_CHANGE_REDIRECT_URL = "account_password"
PASSWORD_RESET_REDIRECT_URL = "account_login"
REMEMBER_ME_EXPIRY = 60*60*24*365*10
USER_DISPLAY = lambda user: user.username
CREATE_ON_SAVE = True
EMAIL_UNIQUE = True
EMAIL_CONFIRMATION_REQUIRED = False
EMAIL_CONFIRMATION_EMAIL = True
EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = "account_login"
EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = None
SETTINGS_REDIRECT_URL = "account_settings"
NOTIFY_ON_PASSWORD_CHANGE = True
DELETION_MARK_CALLBACK = "account.callbacks.account_delete_mark"
DELETION_EXPUNGE_CALLBACK = "account.callbacks.account_delete_expunge"
DELETION_EXPUNGE_HOURS = 48
HOOKSET = "account.hooks.AccountDefaultHookSet"
TIMEZONES = list(zip(pytz.all_timezones, pytz.all_timezones))
LANGUAGES = [
(code, get_language_info(code).get("name_local"))
for code, lang in settings.LANGUAGES
]
def configure_deletion_mark_callback(self, value):
return load_path_attr(value)
def configure_deletion_expunge_callback(self, value):
return load_path_attr(value)
def configure_hookset(self, value):
return load_path_attr(value)()
|
{
"content_hash": "02cb92c8e1f75baa094c77c98c023d3f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 95,
"avg_line_length": 33.3125,
"alnum_prop": 0.6960600375234521,
"repo_name": "Amechi101/concepteur-market-app",
"id": "be18bf2b0f026f9cb5caee62e03588a7929d6bcb",
"size": "2132",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/account/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "270302"
},
{
"name": "HTML",
"bytes": "162957"
},
{
"name": "JavaScript",
"bytes": "170921"
},
{
"name": "Python",
"bytes": "7897172"
},
{
"name": "Shell",
"bytes": "3761"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/space/story_loot/shared_loot_all_freighter_2.iff"
result.attribute_template_id = -1
result.stfName("space/story_loot_n","loot_all_freighter_2")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "c52e463c5415abc87d77f04f1f10a687",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 25.76923076923077,
"alnum_prop": 0.7014925373134329,
"repo_name": "anhstudios/swganh",
"id": "96b1b17afda2008fe8bb8af6e76e5b6023078306",
"size": "480",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/space/story_loot/shared_loot_all_freighter_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LocationmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="locationmode", parent_name="scattergeo", **kwargs):
super(LocationmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values", ["ISO-3", "USA-states", "country names", "geojson-id"]
),
**kwargs,
)
|
{
"content_hash": "2af461b5acf384e42d8ca592474221ae",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 39,
"alnum_prop": 0.5934065934065934,
"repo_name": "plotly/plotly.py",
"id": "fce0b83675a5e33cee92d711cb9b859000fa1751",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergeo/_locationmode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("iris" , "SVC_sigmoid_10")
|
{
"content_hash": "778ad79fc9ba7ae73c2a5624a51910f0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 35.5,
"alnum_prop": 0.7816901408450704,
"repo_name": "antoinecarme/sklearn_explain",
"id": "bf916c55bb809050d42914f23089b67b380f50bf",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/skl_datasets/iris/skl_dataset_iris_SVC_sigmoid_10_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110343"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError :
raise ImportError("setuptools module required, please go to https://pypi.python.org/pypi/setuptools and follow the instructions for installing setuptools")
setup(
version='0.1',
url='',
description='',
name='datesplitter',
packages=['datesplitter'],
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
install_requires=['python-crfsuite>=0.7',
'lxml'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis']
)
|
{
"content_hash": "bba54f93b3da231929cc0bf3f9b58ece",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 159,
"avg_line_length": 40.03448275862069,
"alnum_prop": 0.6253229974160207,
"repo_name": "jernsthausen/datesplitter",
"id": "00f21d2b1e83c40f954cdd5c4a1ed7d884da4245",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38306"
},
{
"name": "Python",
"bytes": "2897353"
},
{
"name": "Shell",
"bytes": "3733"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/items/{id}", response_class=HTMLResponse)
async def read_item(request: Request, id: str):
return templates.TemplateResponse("item.html", {"request": request, "id": id})
|
{
"content_hash": "5f4b02334c9240e6676b2047ed517979",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 31.0625,
"alnum_prop": 0.7686116700201208,
"repo_name": "tiangolo/fastapi",
"id": "245e7110b195d257b8a93b063124465627f1f765",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/templates/tutorial001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
__revision__ = "$Id: bdist_wininst.py 86223 2010-11-05 23:51:56Z eric.araujo $"
import sys, os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst(Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
('user-access-control=', None,
"specify Vista's UAC handling - 'none'/default=no "
"handling, 'auto'=use UAC if target Python installed for "
"all users, 'force'=always use UAC"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
self.user_access_control = None
def finalize_options(self):
if self.bdist_dir is None:
if self.skip_build and self.plat_name:
# If build is skipped and plat_name is overridden, bdist will
# not see the correct 'plat_name' - so set that up manually.
bdist = self.distribution.get_command_obj('bdist')
bdist.plat_name = self.plat_name
# next the command will be initialized using that name
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,))
self.target_version = short_version
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
def run(self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install.plat_name = self.plat_name
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = key.upper()
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def get_inidata(self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return s.replace("\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(name.capitalize(), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
if self.user_access_control:
lines.append("user_access_control=%s" % self.user_access_control)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return "\n".join(lines)
def create_exe(self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
if isinstance(cfgdata, str):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + b"\0"
if self.pre_install_script:
# We need to normalize newlines, so we open in text mode and
# convert back to bytes. "latin1" simply avoids any possible
# failures.
with open(self.pre_install_script, "r",
encoding="latin1") as script:
script_data = script.read().encode("latin1")
cfgdata = cfgdata + script_data + b"\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + b"\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.%s-py%s.exe" %
(fullname, self.plat_name, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.%s.exe" % (fullname, self.plat_name))
return installer_name
def get_exe_bytes(self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = 6.0
else:
bv = 7.1
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
# if plat_name starts with "win" but is not "win32"
# we want to strip "win" and leave the rest (e.g. -amd64)
# for all other cases, we don't want any suffix
if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
sfix = self.plat_name[3:]
else:
sfix = ''
filename = os.path.join(directory, "wininst-%.1f%s.exe" % (bv, sfix))
f = open(filename, "rb")
try:
return f.read()
finally:
f.close()
|
{
"content_hash": "d4ab2b7f00a4243c170e7c41f6bc2871",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 91,
"avg_line_length": 42.92219020172911,
"alnum_prop": 0.5519672351282395,
"repo_name": "MalloyPower/parsing-python",
"id": "cfa65e4a1b91288e3c97c36083d732c31131bd6a",
"size": "14894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/distutils/command/bdist_wininst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
import warnings
warnings.warn(
"The django.test._doctest module is deprecated; "
"use the doctest module from the Python standard library instead.",
DeprecationWarning)
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from django.utils import six
from django.utils.six.moves import StringIO, xrange
if sys.platform.startswith('java'):
# On Jython, isclass() reports some modules as classes. Patch it.
def patch_isclass(isclass):
def patched_isclass(obj):
return isclass(obj) and hasattr(obj, '__module__')
return patched_isclass
inspect.isclass = patch_isclass(inspect.isclass)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, six.string_types):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as fp:
return fp.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, six.string_types), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def _cmpkey(self):
return (self.name, self.filename, self.lineno, id(self))
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp(self._cmpkey(), other._cmpkey())
def __lt__(self, other):
return self._cmpkey() < other._cmpkey()
def __le__(self, other):
return self._cmpkey() <= other._cmpkey()
def __gt__(self, other):
return self._cmpkey() > other._cmpkey()
def __ge__(self, other):
return self._cmpkey() >= other._cmpkey()
def __eq__(self, other):
return self._cmpkey() == other._cmpkey()
def __ne__(self, other):
return self._cmpkey() != other._cmpkey()
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value if its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is six.get_function_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, six.string_types):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, six.string_types)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, six.string_types):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, six.string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = six.get_function_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print(runner.run(test))
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Doctest and Py3 issue:
# If the current example that we wish to run is going to fail
# because it expects a leading u"", then use an alternate displayhook
original_displayhook = sys.displayhook
if six.PY3:
# only set alternate displayhook if Python 3.x or after
lines = []
def py3_displayhook(value):
if value is None:
# None should not be considered at all
return original_displayhook(value)
# Collect the repr output in one variable
s = repr(value)
# Strip b"" and u"" prefixes from the repr and expected output
# TODO: better way of stripping the prefixes?
expected = example.want
expected = expected.strip() # be wary of newlines
s = s.replace("u", "")
s = s.replace("b", "")
expected = expected.replace("u", "")
expected = expected.replace("b", "")
# single quote vs. double quote should not matter
# default all quote marks to double quote
s = s.replace("'", '"')
expected = expected.replace("'", '"')
# In case of multi-line expected result
lines.append(s)
# let them match
if s == expected: # be wary of false positives here
# they should be the same, print expected value
sys.stdout.write("%s\n" % example.want.strip())
# multi-line expected output, doctest uses loop
elif len(expected.split("\n")) == len(lines):
if "\n".join(lines) == expected:
sys.stdout.write("%s\n" % example.want.strip())
else:
sys.stdout.write("%s\n" % repr(value))
elif len(expected.split("\n")) != len(lines):
# we are not done looping yet, do not print anything!
pass
else:
sys.stdout.write("%s\n" % repr(value))
sys.displayhook = py3_displayhook
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
six.exec_(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
finally:
# restore the original displayhook
sys.displayhook = original_displayhook
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
# Python 3.1 requires seek after truncate
self._fakeout.seek(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if six.PY3:
# module name will be in group(1) and the expected
# exception message will be in group(2)
m = re.match(r'(.*)\.(\w+:.+\s)', exc_msg)
# make sure there's a match
if m != None:
f_name = m.group(1)
# check to see if m.group(1) contains the module name
if f_name == exception[0].__module__:
# strip the module name from exc_msg
exc_msg = m.group(2)
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print("%d items had no tests:" % len(notests))
notests.sort()
for thing in notests:
print(" %s" % thing)
if passed:
print("%d items passed all tests:" % len(passed))
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print("%d items had failures:" % len(failed))
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print("%d tests in % d items" % (len(self._name2ft), totalt))
print("%d passed and %d failed." % (totalt - totalf, totalf))
if totalf:
print("***Test Failed*** %d failures." % totalf)
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as e:
... failure = e
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string %s" % name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print("%s of %s examples failed in string %s" % (f, t, name))
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, runner=DocTestRunner):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_runner = runner
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = self._dt_runner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as e:
... failure = e
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
test_class=DocTestCase, **options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(test_class(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
with open(srcfilename, 'w') as fp:
fp.write(src)
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(range(1000)) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(xrange(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
|
{
"content_hash": "11376b5206fc520dfdeb1d01d6893ce5",
"timestamp": "",
"source": "github",
"line_count": 2742,
"max_line_length": 82,
"avg_line_length": 37.42013129102845,
"alnum_prop": 0.5659415628715669,
"repo_name": "makinacorpus/django",
"id": "50d772cfadc5590d90cefba4c426eb6cd9d9f909",
"size": "103156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/test/_doctest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98175"
},
{
"name": "Python",
"bytes": "8391980"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
"""
Django settings for webapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y38m4d0i%l284b--q^e85em@eo(!tn7%+of*a+8r)15&68oni!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'end_customer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "7c8ee3443900fddf53419ee7bf039478",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 24.547619047619047,
"alnum_prop": 0.7235693501454898,
"repo_name": "tejesh95/TransApp",
"id": "c633dd10fc6563758930b73c38909f6ef7b4bb7c",
"size": "2062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/webapp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "1628"
},
{
"name": "JavaScript",
"bytes": "2613"
},
{
"name": "Python",
"bytes": "4505"
}
],
"symlink_target": ""
}
|
import os, sys, traceback
def _genAll(verbose=1):
from reportlab.lib.testutils import setOutDir
setOutDir(__name__)
from reportlab.lib.testutils import testsFolder
topDir=os.path.dirname(testsFolder)
L = [os.path.join(topDir,f) for f in (
#'docs/reference/genreference.py',
'docs/userguide/genuserguide.py',
#'tools/docco/graphdocpy.py',
)
]
for f in ('src/rl_addons/pyRXP/docs/PyRXP_Documentation.rml',
):
f = os.path.join(topDir,f)
if os.path.isfile(f):
L += [f]
break
for p in L:
os.chdir(os.path.dirname(p))
if p[-4:]=='.rml':
try:
from rlextra.rml2pdf.rml2pdf import main
main(exe=0,fn=[os.path.basename(p)], quiet=not verbose, outDir=d)
except:
if verbose: traceback.print_exc()
else:
cmd = '"%s" %s %s' % (sys.executable,os.path.basename(p), not verbose and '-s' or '')
if verbose: print(cmd)
os.system(cmd)
"""Runs the manual-building scripts"""
if __name__=='__main__':
#need a quiet mode for the test suite
if '-s' in sys.argv: # 'silent
verbose = 0
else:
verbose = 1
d = os.path.dirname(sys.argv[0])
if not d:
d = os.getcwd()
elif not os.path.isabs(d):
d = os.path.abspath(d)
sys.path.insert(0,os.path.dirname(d))
_genAll(verbose)
|
{
"content_hash": "00f3d2ee6207e7d460a80c04180cef5b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 97,
"avg_line_length": 32.86666666666667,
"alnum_prop": 0.5409060175794456,
"repo_name": "Distrotech/reportlab",
"id": "18b74ea1ff4568f8a65c3bd96630182fc8b071fd",
"size": "1497",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "docs/genAll.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "721758"
},
{
"name": "C++",
"bytes": "668"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2988317"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
}
|
"""TensorFlow Debugger: Tools for debugging gradients."""
import re
import uuid
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
_GRADIENT_DEBUG_TAG = "gradient_debug_"
_gradient_debuggers = {}
def _tensor_to_grad_debug_op_name(tensor, grad_debugger_uuid):
op_name, slot = debug_graphs.parse_node_or_tensor_name(tensor.name)
return "%s_%d/%s%s" % (op_name, slot, _GRADIENT_DEBUG_TAG, grad_debugger_uuid)
def _parse_grad_debug_op_name(op_name):
"""Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op.
"""
name_items = op_name.split("/")
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if "_" in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index("_")]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind("_") + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind("_")]
orig_tensor_name = ("/".join(name_items[:-2] + [orig_base_op_name]) +
":%d" % orig_tensor_slot)
return grad_debugger_uuid, orig_tensor_name
class GradientsDebugger:
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., `tf.gradients` and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with respect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be registered with this `GradientsDebugger` instance when they
are created, e.g., during `tf.gradients` calls or the construction
of optimization (training) op that uses `tf.gradients`.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
identity_op = (
gen_array_ops.debug_gradient_ref_identity
if input_tensor.dtype._is_ref_dtype else
gen_array_ops.debug_gradient_identity)
# pylint: enable=protected-access
debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)
assert debug_grad_identity.dtype == input_tensor.dtype
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with respect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(y):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Returns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method, this method is used after the
construction of the forward graph has completed. Unlike the
`watch_gradients_by_tensor` method, this method does not use handles to the
tensors of interest; it uses their names.
This method is the same as `watch_gradients_by_tensors` except that the
x-tensors are specified by name patterns, instead of `tf.Tensor` or
`tf.Variable` objects.
Example:
```python
x = tf.Variable(1.0, name="x")
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(r"(x|y):0$"):
train_op = tf.compat.v1.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to x and y.
x_grad = grad_debugger.gradient_tensor("x:0")
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensor_name_regex: the regular-expression pattern of the name(s) of the
x-tensor(s) to watch. x-tensor refers to the tensors on the denominator
of the differentiation.
Returns:
The GradientsDebugger instance itself.
"""
tensor_name_pattern = re.compile(tensor_name_regex)
with graph.as_default():
for op in graph.get_operations():
for output in op.outputs:
if tensor_name_pattern.match(output.name):
debug_op = self.identify_gradient(output)
# Make a copy of output.consumers() since we'll modify the consumers
# TODO(skyewm): this is unnecessary once the C API is enabled
for consumer in list(output.consumers()):
if consumer == debug_op.op:
continue
# Locate the slot index of the original input.
for i, consumer_input in enumerate(consumer.inputs):
if consumer_input == output:
consumer._update_input(i, debug_op) # pylint: disable=protected-access
return self
def _check_same_graph(self, tensor):
if self._graph is None:
self._graph = tensor.graph
elif self._graph != tensor.graph:
raise ValueError(
"The graph of the value (%s) is not the same as the graph %s" %
(tensor.graph, self._graph))
def register_gradient_tensor(self,
x_tensor_name,
gradient_tensor):
"""Register the gradient tensor for an x-tensor.
Args:
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
the tensor on the denominator of the differentiation.
gradient_tensor: the gradient `tf.Tensor`.
"""
if len(_gradient_debuggers) == 1 or self._is_active_context:
self._check_same_graph(gradient_tensor)
self._gradient_tensors[x_tensor_name] = gradient_tensor
def gradient_tensor(self, x_tensor):
"""Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
"""
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError(
"This GradientsDebugger has not received any gradient tensor for "
"x-tensor %s" % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
def gradient_tensors(self):
"""Get the gradient tensors that this object is aware of.
Returns:
A dict mapping x-tensor names to gradient tensor objects. x-tensor refers
to the tensors on the denominator of the differentation.
"""
return self._gradient_tensors
def _get_tensor_name(self, tensor):
if isinstance(tensor, (ops.Tensor, variables.Variable)):
return tensor.name
elif isinstance(tensor, str):
return tensor
else:
raise TypeError(
"x_tensor must be a str or tf.Tensor or tf.Variable, "
"but instead has type %s" % type(tensor))
def clear_gradient_debuggers():
"""Clear all globally registered gradient debuggers."""
_gradient_debuggers.clear()
@ops.RegisterGradient("DebugGradientIdentity")
def _identify_gradient_grad(op, dy):
"""Gradient function for the DebugIdentity op."""
# TODO(cais): Allow overriding gradient.
grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)
grad_debugger = _gradient_debuggers[grad_debugger_uuid]
grad_debugger.register_gradient_tensor(orig_tensor_name, dy)
return dy
@ops.RegisterGradient("DebugGradientRefIdentity")
def _identify_gradient_grad_ref(op, dy):
"""Gradient function for the DebugIdentity op."""
return _identify_gradient_grad(op, dy)
def gradient_values_from_dump(grad_debugger, x_tensor, dump):
"""Find gradient values from a `DebugDumpDir` object.
Args:
grad_debugger: the `tf_debug.GradientsDebugger` instance to be used.
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
dump: A `tfdbg.DebugDumpDir` object.
Returns:
If this `GradientsDebugger` instance has the gradient tensor of `x_tensor`
registered: a list of `numpy.ndarray` representing the value of the
gradient tensor from `dump`. The list could be empty, if the gradient
tensor is not executed in the `tf.Session.run()` call that generated
the `dump`. The list could also contain multiple values of the gradient
tensor, e.g., if gradient tensor is computed repeatedly in a
`tf.while_loop` during the run that generated the `dump`.
Raises:
LookupError: If this `GradientsDebugger` instance does not have the
gradient tensor of `x_tensor` registered.
ValueError: If this `GradientsDebugger` has a `tf.Graph` object that
does not match the `tf.Graph` object of the `dump`.
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
"""
# TODO(cais): Use this method in LocalCLIDebugWrapperSession to present the
# gradient tensors to the TFDBG CLI.
# If possible, verify that the Python graph of the dump and that of this
# GradientsDebugger match.
if (dump.python_graph and grad_debugger.graph and
dump.python_graph != grad_debugger.graph):
raise ValueError(
"This GradientsDebugger instance has a graph (%s) that differs from "
"the graph of the DebugDumpDir object (%s)." %
(grad_debugger.graph, dump.python_graph))
gradient_tensor = grad_debugger.gradient_tensor(x_tensor)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(
gradient_tensor.name)
try:
return dump.get_tensors(node_name, output_slot, "DebugIdentity")
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
return []
|
{
"content_hash": "29da8c01b88d32a132390c81c2b1801a",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 89,
"avg_line_length": 36.876574307304786,
"alnum_prop": 0.6801912568306011,
"repo_name": "tensorflow/tensorflow-pywrap_tf_optimizer",
"id": "8d202c9e1a0e9bcbb08b994d5b1fd5d88de71d53",
"size": "15329",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/lib/debug_gradients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1360509"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "124617937"
},
{
"name": "CMake",
"bytes": "183407"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2104698"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074471"
},
{
"name": "Jupyter Notebook",
"bytes": "789401"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11175525"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294187"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42599764"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "619753"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7521293"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""Forms."""
from django import forms
from imager_images.models import Image, Album
class PhotoForm(forms.ModelForm):
"""This makes a form. Yay."""
class Meta:
model = Image
exclude = ['owner', 'date_modified', 'date_published', 'date_uploaded']
class AlbumForm(forms.ModelForm):
"""This makes a form. Yay."""
class Meta:
model = Album
exclude = ['owner', 'date_modified', 'date_published', 'date_created']
|
{
"content_hash": "04c80f4b60796c4205ddbe5f9f89a425",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 22.047619047619047,
"alnum_prop": 0.6241900647948164,
"repo_name": "ellezv/django_imager",
"id": "8ea3f14a80a5569baa270d61a5d7418ee93708fa",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14005"
},
{
"name": "HTML",
"bytes": "28173"
},
{
"name": "JavaScript",
"bytes": "8503"
},
{
"name": "Python",
"bytes": "70380"
},
{
"name": "Shell",
"bytes": "315"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import time
import sys
import json
if len(sys.argv) < 3:
print "Too few parameters!"
sys.exit()
GPIO.setmode(GPIO.BOARD)
mPort = int(sys.argv[1])
if mPort <= 0 or mPort > 40:
print "id must be 1-40!"
sys.exit()
mMode = sys.argv[2]
if mMode == "in":
GPIO.setup(mPort,GPIO.IN)
inputValue = GPIO.input(mPort)
data = {'name':'gpio','id':mPort,'mode':mMode,'var':inputValue}
print json.dumps(data)
elif mMode == "out":
GPIO.setup(mPort,GPIO.OUT)
if len(sys.argv) < 4:
print "Voltage not specified!"
sys.exit()
mVol = sys.argv[3]
if mVol == "low":
GPIO.output(mPort,GPIO.LOW)
data = {'name':'gpio','id':mPort,'mode':mMode,'var':mVol}
print json.dumps(data)
elif mVol == "high":
GPIO.output(mPort,GPIO.HIGH)
data = {'name':'gpio','id':mPort,'mode':mMode,'var':mVol}
print json.dumps(data)
else:
print "voltage must be high or low!"
else:
print "mode must be in or out!"
|
{
"content_hash": "8133abf18ee5704c5d2694076fe4710a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 67,
"avg_line_length": 25.55,
"alnum_prop": 0.5949119373776908,
"repo_name": "thatblstudio/belong",
"id": "387ef6e38de2c2c46c019b6c89c053c9c895da43",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/www/gpio/gpio_set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "158200"
},
{
"name": "PHP",
"bytes": "761"
},
{
"name": "Python",
"bytes": "2426"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from django.conf import settings
class AccountsReceivableConfig(AppConfig):
name = 'cbe.accounts_receivable'
def ready(self):
import drf_nest.signals
from cbe.accounts_receivable.models import CustomerPayment
from cbe.accounts_receivable.serializers import CustomerPaymentSerializer
exchange_prefix = settings.MQ_FRAMEWORK['EXCHANGE_PREFIX'] + self.name
exchange_header_list = ('vendor','channel')
post_save.connect( drf_nest.signals.notify_extra_args( serializer=CustomerPaymentSerializer,
exchange_prefix=exchange_prefix + ".CustomerPayment",
exchange_header_list=exchange_header_list)(drf_nest.signals.notify_save_instance),
sender=CustomerPayment, weak=False)
|
{
"content_hash": "8f4896014064d412f7a0e2760e1932b7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 147,
"avg_line_length": 49.45,
"alnum_prop": 0.6329625884732053,
"repo_name": "cdaf/cbe",
"id": "5cd24283c8efa12310aa95a3af3c5b076521c8a5",
"size": "989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbe/cbe/accounts_receivable/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2292"
},
{
"name": "HTML",
"bytes": "3112"
},
{
"name": "PowerShell",
"bytes": "20448"
},
{
"name": "Python",
"bytes": "241197"
}
],
"symlink_target": ""
}
|
"""Test listing meters.
"""
import base64
import datetime
import logging
import testscenarios
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests.api.v2 import FunctionalTest
from ceilometer.tests import db as tests_db
load_tests = testscenarios.load_tests_apply_scenarios
LOG = logging.getLogger(__name__)
class TestListEmptyMeters(FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def test_empty(self):
data = self.get_json('/meters')
self.assertEqual([], data)
class TestListMeters(FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListMeters, self).setUp()
for cnt in [
sample.Sample(
'meter.test',
'cumulative',
'',
1,
'user-id',
'project-id',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample',
'size': 123,
'util': 0.75,
'is_public': True},
source='test_source'),
sample.Sample(
'meter.test',
'cumulative',
'',
3,
'user-id',
'project-id',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 11, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample1',
'size': 0,
'util': 0.47,
'is_public': False},
source='test_source'),
sample.Sample(
'meter.mine',
'gauge',
'',
1,
'user-id',
'project-id',
'resource-id2',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2',
'size': 456,
'util': 0.64,
'is_public': False},
source='test_source'),
sample.Sample(
'meter.test',
'cumulative',
'',
1,
'user-id2',
'project-id2',
'resource-id3',
timestamp=datetime.datetime(2012, 7, 2, 10, 42),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample3',
'size': 0,
'util': 0.75,
'is_public': False},
source='test_source'),
sample.Sample(
'meter.mine',
'gauge',
'',
1,
'user-id4',
'project-id2',
'resource-id4',
timestamp=datetime.datetime(2012, 7, 2, 10, 43),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample4',
'properties': {
'prop_1': 'prop_value',
'prop_2': {'sub_prop_1':
'sub_prop_value'}
},
'size': 0,
'util': 0.58,
'is_public': True},
source='test_source1')]:
msg = utils.meter_message_from_counter(
cnt,
self.CONF.publisher.metering_secret)
self.conn.record_metering_data(msg)
def test_list_meters(self):
data = self.get_json('/meters')
self.assertEqual(4, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id',
'resource-id2',
'resource-id3',
'resource-id4']))
self.assertEqual(set(r['name'] for r in data),
set(['meter.test', 'meter.mine']))
self.assertEqual(set(r['source'] for r in data),
set(['test_source', 'test_source1']))
def test_list_samples(self):
data = self.get_json('/samples')
self.assertEqual(5, len(data))
def test_list_meters_with_dict_metadata(self):
data = self.get_json('/meters/meter.mine',
q=[{'field':
'metadata.properties.prop_2.sub_prop_1',
'op': 'eq',
'value': 'sub_prop_value',
}])
self.assertEqual(1, len(data))
self.assertEqual('resource-id4', data[0]['resource_id'])
metadata = data[0]['resource_metadata']
self.assertIsNotNone(metadata)
self.assertEqual('self.sample4', metadata['tag'])
self.assertEqual('prop_value', metadata['properties.prop_1'])
def test_list_samples_with_dict_metadata(self):
data = self.get_json('/samples',
q=[{'field':
'metadata.properties.prop_2.sub_prop_1',
'op': 'eq',
'value': 'sub_prop_value',
}])
self.assertTrue('id' in data[0])
del data[0]['id'] # Randomly generated
self.assertEqual(data, [{
u'user_id': u'user-id4',
u'resource_id': u'resource-id4',
u'timestamp': u'2012-07-02T10:43:00',
u'meter': u'meter.mine',
u'volume': 1.0,
u'project_id': u'project-id2',
u'type': u'gauge',
u'unit': u'',
u'metadata': {u'display_name': u'test-server',
u'properties.prop_2:sub_prop_1': u'sub_prop_value',
u'util': u'0.58',
u'tag': u'self.sample4',
u'properties.prop_1': u'prop_value',
u'is_public': u'True',
u'size': u'0'}
}])
def test_list_meters_metadata_query(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample1',
}],)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
def test_list_meters_resource_metadata_query(self):
# NOTE(jd) Same test as above, but with the alias resource_metadata
# as query field
data = self.get_json('/meters/meter.test',
q=[{'field': 'resource_metadata.tag',
'op': 'eq',
'value': 'self.sample1',
}],)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
def test_list_meters_multi_metadata_query(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample1',
},
{'field': 'metadata.display_name',
'op': 'eq',
'value': 'test-server',
}],)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
def test_list_meters_query_integer_metadata(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.size',
'op': 'eq',
'value': '0',
'type': 'integer'}]
)
self.assertEqual(2, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id',
'resource-id3']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
self.assertEqual(set(r['resource_metadata']['size'] for r in data),
set(['0']))
def test_list_meters_query_float_metadata(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.util',
'op': 'eq',
'value': '0.75',
'type': 'float'}]
)
self.assertEqual(2, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id',
'resource-id3']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
self.assertEqual(set(r['resource_metadata']['util'] for r in data),
set(['0.75']))
def test_list_meters_query_boolean_metadata(self):
data = self.get_json('/meters/meter.mine',
q=[{'field': 'metadata.is_public',
'op': 'eq',
'value': 'False',
'type': 'boolean'}]
)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id2']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.mine']))
self.assertEqual(set(r['resource_metadata']['is_public'] for r
in data), set(['False']))
# FIXME(gordc): verify no false positive (Bug#1236496)
def test_list_meters_query_string_metadata(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample'}]
)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
self.assertEqual(set(r['resource_metadata']['tag'] for r in data),
set(['self.sample']))
def test_list_meters_query_integer_float_metadata_without_type(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'metadata.size',
'op': 'eq',
'value': '0'},
{'field': 'metadata.util',
'op': 'eq',
'value': '0.75'}]
)
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id3']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
self.assertEqual(set(r['resource_metadata']['size'] for r in data),
set(['0']))
self.assertEqual(set(r['resource_metadata']['util'] for r in data),
set(['0.75']))
def test_with_resource(self):
data = self.get_json('/meters', q=[{'field': 'resource_id',
'value': 'resource-id',
}])
nids = set(r['name'] for r in data)
self.assertEqual(set(['meter.test']), nids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source']), sids)
def test_with_resource_and_source(self):
data = self.get_json('/meters', q=[{'field': 'resource_id',
'value': 'resource-id4',
},
{'field': 'source',
'value': 'test_source1',
}])
nids = set(r['name'] for r in data)
self.assertEqual(set(['meter.mine']), nids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source1']), sids)
def test_with_resource_and_metadata_query(self):
data = self.get_json('/meters/meter.mine',
q=[{'field': 'resource_id',
'op': 'eq',
'value': 'resource-id2',
},
{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample2',
}])
self.assertEqual(1, len(data))
self.assertEqual(set(r['resource_id'] for r in data),
set(['resource-id2']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.mine']))
def test_with_source(self):
data = self.get_json('/meters', q=[{'field': 'source',
'value': 'test_source',
}])
rids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-id',
'resource-id2',
'resource-id3']), rids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source']), sids)
def test_with_source_and_metadata_query(self):
data = self.get_json('/meters/meter.mine',
q=[{'field': 'source',
'op': 'eq',
'value': 'test_source',
},
{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample2',
}])
self.assertEqual(1, len(data))
self.assertEqual(set(r['source'] for r in data), set(['test_source']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.mine']))
def test_with_source_non_existent(self):
data = self.get_json('/meters',
q=[{'field': 'source',
'value': 'test_source_doesnt_exist',
}],
)
assert not data
def test_with_user(self):
data = self.get_json('/meters',
q=[{'field': 'user_id',
'value': 'user-id',
}],
)
uids = set(r['user_id'] for r in data)
self.assertEqual(set(['user-id']), uids)
nids = set(r['name'] for r in data)
self.assertEqual(set(['meter.mine', 'meter.test']), nids)
rids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-id', 'resource-id2']), rids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source']), sids)
def test_with_user_and_source(self):
data = self.get_json('/meters',
q=[{'field': 'user_id',
'value': 'user-id4',
},
{'field': 'source',
'value': 'test_source1',
}],
)
uids = set(r['user_id'] for r in data)
self.assertEqual(set(['user-id4']), uids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source1']), sids)
def test_with_user_and_metadata_query(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'user_id',
'op': 'eq',
'value': 'user-id',
},
{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample1',
}])
self.assertEqual(1, len(data))
self.assertEqual(set(r['user_id'] for r in data), set(['user-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
def test_with_user_non_existent(self):
data = self.get_json('/meters',
q=[{'field': 'user_id',
'value': 'user-id-foobar123',
}],
)
self.assertEqual(data, [])
def test_with_project(self):
data = self.get_json('/meters',
q=[{'field': 'project_id',
'value': 'project-id2',
}],
)
rids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-id3', 'resource-id4']), rids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source', 'test_source1']), sids)
def test_with_project_and_source(self):
data = self.get_json('/meters',
q=[{'field': 'project_id',
'value': 'project-id2',
},
{'field': 'source',
'value': 'test_source1',
}],
)
rids = set(r['resource_id'] for r in data)
self.assertEqual(set(['resource-id4']), rids)
sids = set(r['source'] for r in data)
self.assertEqual(set(['test_source1']), sids)
def test_with_project_and_metadata_query(self):
data = self.get_json('/meters/meter.test',
q=[{'field': 'project_id',
'op': 'eq',
'value': 'project-id',
},
{'field': 'metadata.tag',
'op': 'eq',
'value': 'self.sample1',
}])
self.assertEqual(1, len(data))
self.assertEqual(set(r['project_id'] for r in data),
set(['project-id']))
self.assertEqual(set(r['counter_name'] for r in data),
set(['meter.test']))
def test_with_project_non_existent(self):
data = self.get_json('/meters',
q=[{'field': 'project_id',
'value': 'jd-was-here',
}],
)
self.assertEqual(data, [])
def test_list_meters_meter_id(self):
data = self.get_json('/meters')
for i in data:
expected = base64.encodestring('%s+%s' % (i['resource_id'],
i['name']))
self.assertEqual(expected, i['meter_id'])
|
{
"content_hash": "70352668dd92693f318a64ada1f3df48",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 78,
"avg_line_length": 43.125256673511295,
"alnum_prop": 0.38982001714122466,
"repo_name": "lexxito/monitoring",
"id": "067cc39ef5065a9427430bb976e4ff741c4b36cd",
"size": "21683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/api/v2/test_list_meters_scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "HTML",
"bytes": "5892"
},
{
"name": "JavaScript",
"bytes": "63538"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2077479"
},
{
"name": "Shell",
"bytes": "8171"
}
],
"symlink_target": ""
}
|
"""Tests for cement.ext.ext_jinja2."""
import os
import sys
import random
from shutil import copyfile, rmtree
from cement.core import exc, foundation, handler, backend, controller
from cement.utils import test
class Jinja2ExtTestCase(test.CementExtTestCase):
def setUp(self):
super(Jinja2ExtTestCase, self).setUp()
self.app = self.make_app('tests',
extensions=['jinja2'],
output_handler='jinja2',
argv=[]
)
def test_jinja2(self):
self.app.setup()
rando = random.random()
res = self.app.render(dict(foo=rando), 'test_template.jinja2')
jinja2_res = "foo equals %s\n" % rando
self.eq(res, jinja2_res)
def test_jinja2_utf8(self):
self.app.setup()
rando = random.random()
res = self.app.render(dict(foo=rando), 'test_template_utf8.jinja2')
jinja2_res = u"foo est égal à %s\n" % rando
self.eq(res, jinja2_res)
def test_jinja2_filesystemloader(self):
self.app.setup()
self.app._meta.template_dirs = [self.tmp_dir]
# make sure it doesn't load from the tests directory module regardless
self.app._meta.template_module = 'some.bogus.module.path'
tests_dir = os.path.dirname(os.path.dirname(__file__))
from_file = os.path.join(tests_dir, 'templates',
'test_template_parent.jinja2')
to_file = os.path.join(self.tmp_dir, 'test_template_parent.jinja2')
copyfile(from_file, to_file)
from_file = os.path.join(tests_dir, 'templates',
'test_template_child.jinja2')
to_file = os.path.join(self.tmp_dir, 'test_template_child.jinja2')
copyfile(from_file, to_file)
rando = random.random()
res = self.app.render(dict(foo=rando), 'test_template_child.jinja2')
jinja2_res = "foo equals %s\n" % rando
self.eq(res, jinja2_res)
def test_jinja2_packageloader(self):
self.app.setup()
self.app._meta.template_module = 'tests.templates'
self.app._meta.template_dirs = []
rando = random.random()
res = self.app.render(dict(foo=rando), 'test_template_child.jinja2')
jinja2_res = "foo equals %s\n" % rando
self.eq(res, jinja2_res)
@test.raises(exc.FrameworkError)
def test_jinja2_bad_template(self):
self.app.setup()
res = self.app.render(dict(foo='bar'), 'bad_template2.jinja2')
@test.raises(exc.FrameworkError)
def test_jinja2_nonexistent_template(self):
self.app.setup()
res = self.app.render(dict(foo='bar'), 'missing_template.jinja2')
@test.raises(exc.FrameworkError)
def test_jinja2_none_template(self):
self.app.setup()
try:
res = self.app.render(dict(foo='bar'), None)
except exc.FrameworkError as e:
self.eq(e.msg, "Invalid template path 'None'.")
raise
@test.raises(exc.FrameworkError)
def test_jinja2_bad_module(self):
self.app.setup()
self.app._meta.template_module = 'this_is_a_bogus_module'
res = self.app.render(dict(foo='bar'), 'bad_template.jinja2')
|
{
"content_hash": "68fc37ef768f94b834a3a233d88dfc17",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 35.76086956521739,
"alnum_prop": 0.59209726443769,
"repo_name": "akhilman/cement",
"id": "7696e4fa825b7e0bc3945ad2219e0995d57ac620",
"size": "3316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ext/jinja2_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "186"
},
{
"name": "Makefile",
"bytes": "317"
},
{
"name": "PowerShell",
"bytes": "2184"
},
{
"name": "Python",
"bytes": "512585"
},
{
"name": "Shell",
"bytes": "1964"
}
],
"symlink_target": ""
}
|
"""An ox_herd plugin for posting messages to github.
This module provides a single class: PostToGitHub to post messages to github.
It is useful as a tool for the pylint plugin and perhaps others.
Provided that you include this plugin in the OX_PLUGINS variable in
ox_herd.settings or in your OX_PLUGINS environment variable, this will be
picked up automatically and used as a plugin.
In general, there are a variety of more complicated things you can do
in configuring plugins. See documentation on plugins or see the pytest_plugin
example for more details.
"""
import configparser
import json
from eyap.core import github_comments
from ox_herd.core.plugins import base
from ox_herd.core.ox_tasks import OxHerdTask
class PostToGitHub(OxHerdTask, base.OxPluginComponent):
"""Class to post a message to github.
"""
def __init__(self, msg, full_repo, title, number, conf_file, conf_sec,
*args, **kw):
"""Initializer.
:arg msg: String message to post.
:arg full_repo: Full name of github repo (e.g., 'aocks/ox_herd').
:arg title: String title of issue to post to *ONLY* if no
github_issue is specified in the conf file (see below).
This will be ignored if github_issue is provided.
:arg number: Optional issue number. This is useful if you have
multiple issues with the same title and need to
distinguish between them.
:arg conf_file: Path to configuration file to be read by python
configparser module.
:arg conf_sec: String name of section in conf_file to read.
This section should have entries for the following:
github_user: Name of github user for login.
github_token: Token or password to access github.
github_issue: Optional issue title to use instead of
title argument.
:arg *args: Argumnets to OxHerdTask.__init__.
:arg **kw: Keyword arguments to OxHerdTask.__init__.
"""
OxHerdTask.__init__(self, *args, **kw)
base.OxPluginComponent.__init__(self)
self.msg = msg
self.full_repo = full_repo
self.title = title
self.number = number
self.conf_file = conf_file
self.conf_sec = conf_sec
@classmethod
def main_call(cls, ox_herd_task):
"""Main method to post to github.
:arg ox_herd_task: Instance of a PostToGitHub task perhaps containing
additional data (e.g., ox_herd_task.name).
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: Dictionary with 'return_value' and 'json_blob' as
required for OxPluginComponent.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Post a message to github.
"""
my_config = configparser.ConfigParser()
my_config.read(ox_herd_task.conf_file)
my_csec = my_config[ox_herd_task.conf_sec]
cthread = cls.prep_comment_thread(
ox_herd_task.title, ox_herd_task.number, ox_herd_task.full_repo,
my_csec)
cthread.add_comment(ox_herd_task.msg, allow_create=True)
return {
'return_value': 'Task %s completed succesfully.' % (
ox_herd_task.name), 'json_blob': json.dumps({})}
@staticmethod
def prep_comment_thread(title, number, full_repo, my_conf):
"""Prepare a CommentThread object to use in positing comments.
:arg ox_herd_task: Ox Herd task with raw data.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: A GitHubCommentThread object.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: This method reads from the configuration in the dictionary
in my_conf, figures out the github parameters, and creates a
GitHubCommentThread we can use in posting comments.
"""
user = my_conf['github_user']
token = my_conf['github_token']
topic = my_conf['github_issue'] if 'github_issue' in my_conf else None
if topic is None:
topic = title
thread_id = number
if isinstance(thread_id, str):
thread_id = thread_id.strip()
if thread_id == '':
thread_id = None
else:
thread_id = None
owner, repo = full_repo.split('/')
comment_thread = github_comments.GitHubCommentThread(
owner, repo, topic, user, token, thread_id=thread_id)
return comment_thread
|
{
"content_hash": "0b18f65f0926d568708c4916228479b9",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 36.27407407407407,
"alnum_prop": 0.5570757606697978,
"repo_name": "aocks/ox_herd",
"id": "0835a53c94a3cc18aa31c0946655ec9783661f73",
"size": "4897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ox_herd/core/plugins/post_to_github_plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30097"
},
{
"name": "Dockerfile",
"bytes": "3198"
},
{
"name": "HTML",
"bytes": "20012"
},
{
"name": "Makefile",
"bytes": "314"
},
{
"name": "Python",
"bytes": "159512"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_tatooine_hermit.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","hermit")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "d1becae3a5c0d5a7b3287e6aaae23f18",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.6976744186046512,
"repo_name": "anhstudios/swganh",
"id": "ae37e74cf9edc943bc77e880dd5ddda46a193b6e",
"size": "446",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_tatooine_hermit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
future.backports package
"""
from __future__ import absolute_import
import sys
__future_module__ = True
from future.standard_library import import_top_level_modules
if sys.version_info[0] >= 3:
import_top_level_modules()
from .misc import (ceil,
OrderedDict,
Counter,
ChainMap,
check_output,
count,
recursive_repr,
_count_elements,
cmp_to_key
)
|
{
"content_hash": "64ef87207a5318c611119f9a093bf9da",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 20.384615384615383,
"alnum_prop": 0.49433962264150944,
"repo_name": "PythonCharmers/python-future",
"id": "c71e065354c4a8522b5c3e6d2ab91942bd9b9e11",
"size": "530",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/future/backports/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2465543"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
}
|
import flask.helpers
import flask.sessions
import itsdangerous
cookie_name = 'session'
cookie_value = ''
secret_key = 'drones'
secret_key = 'drone'
def open_session_original(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = flask.helpers.total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except itsdangerous.BadSignature:
return self.session_class()
def get_signing_serializer_original(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return itsdangerous.URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session_modified(self, app, request):
s = self.get_signing_serializer(self, app)
if s is None:
return None
val = cookie_value
if not val:
return self.session_class()
max_age = flask.helpers.total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except itsdangerous.BadSignature:
return self.session_class()
def get_signing_serializer_modified(self, app):
if not secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return itsdangerous.URLSafeTimedSerializer(secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
app = flask.Flask(__name__)
app.secret_key = secret_key
with app.test_request_context('/?name=Peter'):
assert flask.request.path == '/'
assert flask.request.args['name'] == 'Peter'
print(app)
s = flask.sessions.SecureCookieSessionInterface() # import flask.sessions
# session.get_signing_serializer_modified = get_signing_serializer_modified
s.open_session = open_session_modified
s.get_signing_serializer = get_signing_serializer_modified
s.open_session(s, app, flask.request)
print(s)
print(dir(s))
# print(flask.session)
# print(flask.session['session'])
|
{
"content_hash": "7ab475ed27b8fa51c716ccfa515eaa0e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 30.607142857142858,
"alnum_prop": 0.6374951380785686,
"repo_name": "benhunter/py-stuff",
"id": "015fa3149f9d674540a646664502053972fd119b",
"size": "2955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netcat/drones-flask-cookie.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2945"
},
{
"name": "Jupyter Notebook",
"bytes": "2209"
},
{
"name": "PHP",
"bytes": "3366"
},
{
"name": "Python",
"bytes": "1132716"
}
],
"symlink_target": ""
}
|
import maya.cmds as cmds
def multObjImport():
files_to_import = cmds.fileDialog2(fileFilter = '*.obj', dialogStyle = 2, caption = 'import multiple object files', fileMode = 4)
for file_to_import in files_to_import:
names_list = file_to_import.split('/')
object_name = names_list[-1].replace('.obj', '')
returnedNodes = cmds.file('%s' % file_to_import, i = True, type = "OBJ", rnn=True, ignoreVersion = True, options = "mo=0", loadReferenceDepth = "all" )
cmds.rename( returnedNodes[0], object_name)
for nd in returnedNodes:
if '|' in nd and 'Shape' not in nd:
cmds.rename(nd, object_name)
multObjImport()
|
{
"content_hash": "71e041b2e517a4a2b1b64038e066f4c2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 162,
"avg_line_length": 55.5,
"alnum_prop": 0.6411411411411412,
"repo_name": "aaronfang/personal_scripts",
"id": "545d302adea13b2bb6bd2c2a0408614f4eaae47c",
"size": "666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "af_scripts/misc/multObjImport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "319303"
},
{
"name": "Python",
"bytes": "154066"
}
],
"symlink_target": ""
}
|
"""Main Window.
"""
import sys
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from mainwindow import Ui_MainWindow
import addpi_tela
import addtipocred_tela
import addcomite_tela
import addoi_tela
import editlimitescomite_tela
import editlimitesoi_tela
import buscaoi1_tela
import buscapi_tela
import buscacomite_tela
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.connect_signals()
self.cont = 0
self.title = self.windowTitle()
def connect_signals(self):
self.ui.actionProfissional_de_Imprensa.\
triggered.connect(lambda: self.add_widget('pi'))
self.ui.actionTipo_Credencial.triggered.\
connect(lambda: self.add_widget('tipocred'))
self.ui.actionComite.triggered.\
connect(lambda: self.add_widget('comite'))
self.ui.actionOrg_o_de_Imprensa.triggered.\
connect(lambda: self.add_widget('oi'))
self.ui.actionLimites_Comite.triggered.\
connect(lambda: self.add_widget('limitescomite'))
self.ui.actionLimites_Org_o_de_Imprensa.triggered.\
connect(lambda: self.add_widget('limitesoi'))
self.ui.actionOrg_os_de_Imprensa.triggered.\
connect(lambda: self.add_widget('buscaoi'))
self.ui.actionProfissionais_de_Imprensa.triggered.\
connect(lambda: self.add_widget('buscapi'))
self.ui.actionComit.triggered.\
connect(lambda: self.add_widget('buscacomite'))
def add_widget(self, tipo):
for item in self.children():
if isinstance(item, QtWidgets.QDockWidget):
item.hide()
self.dw = QtWidgets.QDockWidget(self)
self.dw.setMinimumWidth(400)
self.dw.setFeatures(QtWidgets.QDockWidget.DockWidgetClosable)
if(tipo == 'pi'):
self.widget = addpi_tela.add_pi(self.dw.widget())
elif(tipo == 'tipocred'):
self.widget = addtipocred_tela.add_tipocred(self.dw.widget())
elif(tipo == 'comite'):
self.widget = addcomite_tela.add_comite(self.dw.widget())
elif(tipo == 'oi'):
self.widget = addoi_tela.add_oi(self.dw.widget())
elif(tipo == 'limitescomite'):
self.widget = editlimitescomite_tela.edit_limitescomite(self.dw.widget())
self.dw.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
elif(tipo == 'limitesoi'):
self.widget = editlimitesoi_tela.edit_limitesoi(self.dw.widget())
self.dw.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
elif(tipo == 'buscaoi'):
self.widget = buscaoi1_tela.busca_oi1(self.dw.widget())
self.dw.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
elif(tipo == 'buscapi'):
self.widget = buscapi_tela.busca_pi(self.dw.widget())
self.dw.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
elif(tipo == 'buscacomite'):
self.widget = buscacomite_tela.busca_comite(self.dw.widget())
self.dw.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.setWindowTitle(self.widget.windowTitle())
self.dw.setWidget(self.widget)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dw)
def change_text(self):
self.cont += 1
def main():
app = QtWidgets.QApplication(sys.argv)
mw_editor = MainWindow()
mw_editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
{
"content_hash": "57e253781e80736759a137d5b10c0d03",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 85,
"avg_line_length": 37.916666666666664,
"alnum_prop": 0.6384615384615384,
"repo_name": "zsinx6/elainethegoddess",
"id": "9e4b6b7ded0f4f9d7d4cd17c73634af302382904",
"size": "3674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tela.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "12834"
},
{
"name": "Python",
"bytes": "77479"
}
],
"symlink_target": ""
}
|
"""
Vis: Plot a map using the Orthographic map projection and filled contours
"""
from fatiando import gridder, utils
from fatiando.vis import mpl
# Generate some data to plot
area = (-40, 0, 10, -50)
shape = (100, 100)
lon, lat = gridder.regular(area, shape)
data = utils.gaussian2d(lon, lat, 10, 20, -20, -20, angle=-45)
# Now get a basemap to plot with some projection
bm = mpl.basemap(area, 'ortho')
# And now plot everything passing the basemap to the plotting functions
mpl.figure()
bm.bluemarble()
mpl.contourf(lon, lat, data, shape, 12, basemap=bm)
mpl.colorbar()
mpl.show()
|
{
"content_hash": "a34394c8e853ee0f863fa3639848fb42",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 27.904761904761905,
"alnum_prop": 0.7201365187713311,
"repo_name": "mtb-za/fatiando",
"id": "813d7a5f82e1056445853ec59bb569386468d62c",
"size": "586",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cookbook/vis_mpl_basemap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "454779"
},
{
"name": "Makefile",
"bytes": "1756"
},
{
"name": "Python",
"bytes": "1024422"
},
{
"name": "Shell",
"bytes": "3825"
}
],
"symlink_target": ""
}
|
''' Models for various kinds of arrow heads that can be added to
Arrow annotations.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Float, Include, Override
from ..core.property_mixins import ScalarFillProps, ScalarLineProps
from .annotations import Annotation
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ArrowHead',
'NormalHead',
'OpenHead',
'TeeHead',
'VeeHead',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class ArrowHead(Annotation):
''' Base class for arrow heads.
'''
class OpenHead(ArrowHead):
''' Render an open-body arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
class NormalHead(ArrowHead):
''' Render a closed-body arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
fill_props = Include(ScalarFillProps, use_prefix=False, help="""
The %s values for the arrow head interior.
""")
fill_color = Override(default="black")
class TeeHead(ArrowHead):
''' Render a tee-style arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
class VeeHead(ArrowHead):
''' Render a vee-style arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(ScalarLineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
fill_props = Include(ScalarFillProps, use_prefix=False, help="""
The %s values for the arrow head interior.
""")
fill_color = Override(default="black")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "4a2430b9592cca51e51b60e9ae40369e",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 27.462184873949578,
"alnum_prop": 0.42441860465116277,
"repo_name": "ericmjl/bokeh",
"id": "270741a790a2ed04703ef61332c60a8575c6387b",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/models/arrow_heads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
}
|
"""
Backwards-compatibility plugin for the Qt reactor.
This provides a Qt reactor plugin named C{qt} which emits a deprecation
warning and a pointer to the separately distributed Qt reactor plugins.
"""
import warnings
from twisted.application.reactors import Reactor, NoSuchReactor
wikiURL = 'http://twistedmatrix.com/trac/wiki/QTReactor'
errorMessage = ('qtreactor is no longer a part of Twisted due to licensing '
'issues. Please see %s for details.' % (wikiURL,))
class QTStub(Reactor):
"""
Reactor plugin which emits a deprecation warning on the successful
installation of its reactor or a pointer to further information if an
ImportError occurs while attempting to install it.
"""
def __init__(self):
super(QTStub, self).__init__(
'qt', 'qtreactor', 'QT integration reactor')
def install(self):
"""
Install the Qt reactor with a deprecation warning or try to point
the user to further information if it cannot be installed.
"""
try:
super(QTStub, self).install()
except (ValueError, ImportError):
raise NoSuchReactor(errorMessage)
else:
warnings.warn(
"Please use -r qt3 to import qtreactor",
category=DeprecationWarning)
qt = QTStub()
|
{
"content_hash": "59603ebcfe0b0bbf43fd5841bb3cfcc3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 32.16279069767442,
"alnum_prop": 0.6377440347071583,
"repo_name": "timkrentz/SunTracker",
"id": "0e15bcd3d7e1a85578eb6655b817ef27a9e4608f",
"size": "1457",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/plugins/twisted_qtstub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
}
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXX X XXXXX XXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X XXXXXXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
X
XX XXXXXXX XXX XXX XXXXX XXXXXXX XX
XXXXXXXXX
XXXXXXXX X
XXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXX
XXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXX X
XXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XX X
XXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXX XXX XXXXX XXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XX X
XXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
X
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXXXX XXXXXX
XXXXXXXXXXX X X
XXXXXXXXX XXXXXX
XXXXXXXX XXXXXXXX
XXXXXXXX X
X XX
XXXXXX XX XX XXXXX XX
X XX
XXXXXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XX X
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXX XX X
XXXXXXXXXXXXXX X XXXX
X XX
X XXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXX XXXXXXXXXXX XX XXXX XX XXXXXXX XXXX X XXXXX XXXXXXX XX XX XXXXX XXXXXXX X XXXXXX XXX XXX XXXX XXX XX XXXXXXXX XXXX XXXXXXXXXXXX XX XXXXX XX XXXX
XXXXXXXX XXXXXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXX XXX XXXXXXXXXXX XXXX XXXXX XX XXXX XXXXXXX XX XXX XXX
XXXXX XXXXXX XXX XX XXXXXX XXXX XXX XX XXXX XX XXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXX
XXXXXXXXXXXXXXX XXXXX
XXXXXXX XXXXXX
XXXXXXXXXXX X X
XXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXX X
X XX
XXXXXX XX XX XXXXX XX
X XX
XXXXXXXXX XXXXXXXXX XXXXXXXXXXX XXXXXXXX XX X
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXX XX X
XXXXXXXXXXXXXX X XXXX
X XX
X XXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXX XXX XXXX XXXXX XXXXXXX XX
XXX XX X XXXXXXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXX X
XXXXXX XXXXXX
XXXXXXX X XXXXX
X
XX XXXXXXX XXX XXX XXXXX XXXXXXX XX
XXXXXXXXX
XXXXXXXX X
XXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXX
XXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXX X
XXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XX X
XXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXX XXX XXXXX XXXXXX
X
XXXXXXXXXXXXXXXXXXXXXXXX XX X
XXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXX
XXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
{
"content_hash": "89d487b0f902d857befbb3b79a0af3b7",
"timestamp": "",
"source": "github",
"line_count": 932,
"max_line_length": 167,
"avg_line_length": 25.488197424892704,
"alnum_prop": 0.7363923384550621,
"repo_name": "dnaextrim/django_adminlte_x",
"id": "3c466a96fdb0a1f53617c265682d58dab43d9a30",
"size": "23755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adminlte/static/plugins/datatables/extensions/FixedColumns/examples/index_column.html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "487538"
},
{
"name": "HTML",
"bytes": "1939871"
},
{
"name": "JavaScript",
"bytes": "2949324"
},
{
"name": "PHP",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "11030"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Nov 29 14:36:12 2016
@author: Dominik
"""
import socket
import sys
import time_manager as tm
class TCPManager():
def __init__(self, tcp_ip, tcp_port, buffer_size):
self.TCP_IP = tcp_ip
self.TCP_PORT = tcp_port
self.BUFFER_SIZE = buffer_size
self.create_socket()
self.has_server_stopped = False
def stop_server(self):
print("Shutting down socket...")
self.s.shutdown(socket.SHUT_RDWR)
print("Closing socket...")
self.s.close()
self.has_server_stopped = True
def create_socket(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((self.TCP_IP, self.TCP_PORT))
self.s.listen(5)
def start_input_loop(self):
try:
while not self.has_server_stopped:
print("Waiting for connection...")
conn, addr = self.s.accept()
print("Connection address:", addr)
data = conn.recv(BUFFER_SIZE)
data = data.decode("utf-8")
print("Received data:", data)
if data == "":
print("Error: Empty data received.")
conn.send(data.encode())
conn.close()
elif data == "-stop":
print("Closing connection...")
conn.send(data.encode())
conn.close()
break
elif data == "-check":
print("Checking.")
conn.send("check".encode())
conn.close()
else:
self.handle_data(data.split(" "),conn)
except:
print(sys.exc_info()[0])
finally:
self.stop_server()
def handle_data(self, data, conn):
if "-a" in data:
delta_seconds = int(data[1])
tm.add_secs_to_end_time(delta_seconds)
conn.send("check".encode())
elif "-s" in data:
delta_seconds = int(data[1])
tm.add_secs_to_end_time(-delta_seconds)
conn.send("check".encode())
elif "-settext" in data:
text_list = data[1:]
text = " ".join(text_list)
tm.save_hint_text_to_txt(text)
conn.send("check".encode())
elif "-getremainingtime" in data:
conn.send(tm.get_hint_text_from_txt("remaining_time.txt").encode())
elif "-gethinttext" in data:
conn.send(tm.get_hint_text_from_txt("hint_text.txt").encode())
elif "-getendtime" in data:
end_time = str(tm.get_time_from_txt("end_time.txt"))
print(end_time)
conn.send(end_time.encode())
conn.close()
if __name__ == "__main__":
TCP_PORT = 5005
BUFFER_SIZE = 1024
print("sdfsd")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0)) # connecting to a UDP address doesn't send packets
TCP_IP = s.getsockname()[0]
print("asdf")
tcp_manager = TCPManager(TCP_IP, TCP_PORT, BUFFER_SIZE)
print("Created TCP Socket at %s:%s." %(TCP_IP,TCP_PORT))
tcp_manager.start_input_loop()
print("Stopped TCP Server.")
|
{
"content_hash": "936fcb4899fae5a2d09d36165159660e",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 81,
"avg_line_length": 31.541284403669724,
"alnum_prop": 0.4909831297265852,
"repo_name": "zapfdk/broadcast-clock",
"id": "3b366864e8ecb22a174f9ff79d7bc08697422efd",
"size": "3484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcp_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23633"
}
],
"symlink_target": ""
}
|
""" manage logging configuration
USAGE:
### perform once as the application begins to initialize logging system-wide
from putil.logging import config
config.add_configuration("some/path/logging.yml") # could be normal file
config.add_configuration("or/resource/logging.local.yml") # or resource within egg
# define special fields for GELF records
config.set_logging_fields( {"username":"user", "conversation-id":"session"}, {"system":"alpha"} )
### now throughout codebase, can write log records
from putil.logging import log
log.info("up and running now")
### but can also go back and change configuration later
def oh_please_make_it_stop():
config.set_level("pyon.net.endpoint", logging.ERROR)
config.set_level("pyon.ion.endpoint", logging.ERROR)
"""
from logging import NOTSET
import logging.config
import errno
import yaml
import collections
from pkg_resources import resource_string
import putil.logging
import logger
import sys
import traceback
class _LoggingConfiguration(object):
def __init__(self):
self.current_config = {}
self.debug = False
#### can't use normal logging to debug the logging! flimsy method to use STDOUT...
def set_debug(self, value=True):
""" log all calls to public methods:
True: enabled
False: disabled
"verbose": also log the stack to see where the call is coming from
"""
self.debug = value
def _debug(self, message):
""" conditionally log the message and call stack """
if self.debug:
self._log(message)
if self.debug=='verbose':
try:
self._log('Stack trace:\n' + '\t\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()]))
except:
self._log('Failed to get stack information')
def _log(self, message):
""" print a message to STDOUT """
print >> sys.stderr, message
def add_configuration(self, configuration, initial=False):
self._debug('DEBUG LOGGING: add_configuration: %r' % configuration)
if not configuration:
return # no config = no-op
if isinstance(configuration, dict):
self._add_dictionary_configuration(configuration, initial)
elif isinstance(configuration, str):
# is a configuration file or resource -- try both
contents = self._read_file(configuration) or self._read_resource(configuration)
if not contents:
raise IOError('failed to locate logging configuration: ' + configuration)
parsed = yaml.load(contents)
self.add_configuration(parsed, initial)
elif isinstance(configuration, list) or isinstance(configuration, tuple):
for item in configuration:
self.add_configuration(item, initial)
else:
raise Exception("ERROR: unable to configure logging from a %s: %s" % (configuration.__class__.__name__, repr(configuration)))
def _add_dictionary_configuration(self, configuration, initial):
if not initial:
self._warn_about_supplemental_handlers(configuration)
if 'context' in configuration:
if 'context' in self.current_config:
self._log("WARNING: logging context filters are additive")
self._handle_context_entries(configuration)
if 'disable_existing_loggers' not in configuration:
self.current_config['disable_existing_loggers'] = False
self._add_dictionary(self.current_config, configuration)
logging.config.dictConfig(self.current_config)
self._debug('DEBUG LOGGING: configuration: %r' % self.current_config)
def _warn_about_supplemental_handlers(self, configuration):
if not self.debug:
return
do_warn = 'root' in configuration and 'handlers' in configuration['root']
if not do_warn and 'loggers' in configuration:
logger_config = configuration['loggers']
for key in logger_config:
if 'handlers' in logger_config:
do_warn = True
break
if do_warn:
self._log('WARNING: supplemental file contains handlers (usually supplemental logging config files should just contain level overrides)')
def _handle_context_entries(self, configuration):
attribute_name = configuration['context']['attribute'] if 'attribute' in configuration['context'] else None
static = configuration['context']['static'] if 'static' in configuration['context'] else {}
dynamic = configuration['context']['thread-local'] if 'thread-local' in configuration['context'] else {}
self.set_logging_fields(dynamic, static, attribute_name)
def replace_configuration(self, configuration):
self.current_config.clear()
self.add_configuration(configuration, initial=True)
def set_level(self, scope, level, recursive=False):
self._debug('DEBUG LOGGING: set_level: %s: %s' % (scope,level))
if scope:
changes = { scope: {'level':level }}
if recursive:
first_part = scope + '.'
if 'loggers' in self.current_config:
for name in self.current_config['loggers'].keys():
if name.startswith(first_part):
changes[name] = NOTSET
config = { 'loggers': changes }
self.add_configuration(config)
else:
config = { 'root': self.current_config['root'] }
config['root']['level'] = level
if recursive:
config['loggers'] = {}
if 'loggers' in self.current_config:
for name in self.current_config['loggers'].keys():
config['loggers'][name] = NOTSET
self.add_configuration(config)
def set_all_levels(self, level):
self._debug('DEBUG LOGGING: set_all_levels: %s' % level)
changes = {'root':{'level':level}}
if 'loggers' in self.current_config:
for scope in self.current_config['loggers'].keys():
changes[scope] = {'level':'NOTSET'}
self.add_configuration(changes)
def get_configuration(self):
return self.current_config
def _read_file(self, filename):
try:
with open(filename, 'r') as infile:
return infile.read()
except IOError, e:
if e.errno != errno.ENOENT:
self._log('ERROR: error reading logging configuration file %r: %s' % (filename, e))
return None
def _read_resource(self, resource_name):
try:
return resource_string('', resource_name)
except IOError, e:
if e.errno != errno.ENOENT:
self._log('ERROR: error reading logging configuration file %r: %s' % (resource_name, e))
return None
def _add_dictionary(self, current, added):
""" from pyon.core.common, except allow recursion (logging config isn't too deep) """
if added:
for key in added:
if key in current and isinstance(current[key], collections.Mapping):
self._add_dictionary(current[key], added[key])
else:
current[key] = added[key]
def add_filter(self, filter):
""" add a filter to all new loggers created """
putil.logging.log._add_filter(filter)
def set_logging_fields(self, thread_local_fields, constant_fields, attribute_name):
"""WARNING: calling multiple times is currently additive -- will not replace fields"""
filter = logger.AddFields(attribute_name, thread_local_fields, constant_fields)
self.add_filter(filter)
|
{
"content_hash": "97e84fd79f4c2ad84717d1b292f210ab",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 149,
"avg_line_length": 42.40860215053763,
"alnum_prop": 0.6102941176470589,
"repo_name": "scionrep/scioncc",
"id": "a202baf3722686de25a46b057a0317dfc71b9e8c",
"size": "7888",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/putil/logging/configure.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "JavaScript",
"bytes": "11408"
},
{
"name": "PLpgSQL",
"bytes": "10932"
},
{
"name": "Python",
"bytes": "2699420"
},
{
"name": "Shell",
"bytes": "12708"
}
],
"symlink_target": ""
}
|
"""
Utility vote handler.
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import functools
import itertools
import logging
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class CoupdEtat(Exception):
"""
Election result is forced
"""
def __init__(self, claimant):
"""
Sets up members
:param claimant: The candidate that claims to be elected
"""
# Do keep a None here
self.claimant = claimant
super(CoupdEtat, self).__init__(str(self))
def __str__(self):
"""
String representation
"""
return "Coup d'État by {0:s}.".format(self.claimant)
class NextTurn(Exception):
"""
Needs a new turn of vote
"""
def __init__(self, candidates=None):
"""
Sets up members
:param candidates: Candidates for the next vote
"""
# Do not keep a None here
self.candidates = candidates or []
super(NextTurn, self).__init__(str(self))
def __str__(self):
"""
String representation
"""
return 'Next turn with {0}.'\
.format(', '.join(str(candidate) for candidate in self.candidates))
# ------------------------------------------------------------------------------
@functools.total_ordering
class _Vote(object):
"""
Associates a candidates to votes
"""
def __init__(self, candidate):
"""
Sets up members
:param candidate: The candidate associated to the vote
"""
self.__candidate = candidate
self.__votes = 0
def __hash__(self):
"""
Vote hash is the one if the candidate
"""
return hash(self.__candidate)
def __eq__(self, other):
"""
Equality based on the candidate
"""
return self.__candidate == other.__candidate
def __str__(self):
"""
String representation
"""
return '{0:s} ({1:d} votes)'.format(self.__candidate, self.__votes)
__repr__ = __str__
def __lt__(self, other):
"""
Lesser than other if less votes or "lesser" string representation
"""
if self.__votes == other.__votes:
return str(self.__candidate) < str(other.__candidate)
return self.__votes < other.__votes
@property
def candidate(self):
"""
The candidate associated to the vote
"""
return self.__candidate
@property
def votes(self):
"""
The number of votes for this candidate
"""
return self.__votes
def reset(self):
"""
Resets the number of votes
"""
self.__votes = 0
def vote(self):
"""
Vote for this candidate
"""
self.__votes += 1
# ------------------------------------------------------------------------------
class MatchVote(object):
"""
Election of the candidate matching the given element
"""
def __init__(self, electors):
"""
Sets up members
:param electors: Electors for this vote
"""
self._electors = frozenset(electors)
def _compute_majority(self, votes, default=None):
"""
Returns the candidate with nb_voters+1 votes, or the set of candidates
for the next turn.
:param votes: A set of _Vote beans
:param default: Result if no votes given
:return: The candidate elected by majority
:raise NextTurn: A new turn is necessary
"""
nb_voters = len(self._electors)
# Absolute majority
majority = (nb_voters / 2) + 1
# Sort by number of votes
results = sorted(votes, reverse=True)
if results[0].votes >= majority:
# Elected by majority
return results[0].candidate
# Threshold to go on next turn: > 10% of voters
threshold = (nb_voters / 10) + 1
def predicate(result):
"""
Predicate to filter candidates according to their results
"""
return result.votes >= threshold
# Call for next turn
candidates = {result.candidate
for result in itertools.takewhile(predicate, results)}
raise NextTurn(candidates)
def _compute_results(self, votes, default=None):
"""
Computes the results of an election
:param votes: A set of _Vote beans
:param default: Result if no votes given
:return: The elected candidate, or a new neutral one
:raise NextTurn: No candidate with majority
"""
if not votes:
# No one elected: force a new isolate
return default
elif len(votes) == 1:
# Only 1 of the candidates has been retained
_logger.critical("Only 1 of the candidates has been retained")
return next(iter(votes)).candidate
else:
# Compute isolates with majority or raises a NextTurn exception
return self._compute_majority(votes)
def vote(self, subject, initial_candidates, default=None, max_turns=3):
"""
Votes for one of the given candidates or creates a new one
:param subject: Subject of election
:param initial_candidates: Initial candidates of the election
:param default: Candidate to return if no one has been elected
:param max_turns: Maximum number of turns
:return: The elected candidate
"""
# Candidate −> Votes
candidates = tuple(_Vote(candidate)
for candidate in initial_candidates)
elected = None
try:
for _ in range(max_turns):
try:
for elector in self._electors:
# Let each elector vote
elector.vote(subject, candidates)
# Get the results
elected = self._compute_results(candidates, default)
break
except NextTurn as ex:
# Still not decided
candidates = tuple(_Vote(candidate)
for candidate in ex.candidates)
except CoupdEtat as ex:
# Well, that escalated quickly...
_logger.critical("Coup d'État for %s", ex.claimant)
elected = ex.claimant
if elected is None:
# Election failed
return default
# Return the elected isolate
return elected
|
{
"content_hash": "11637b1cc63e17a7acac73a23634d52f",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 80,
"avg_line_length": 28.29259259259259,
"alnum_prop": 0.5355413012174368,
"repo_name": "ahmadshahwan/cohorte-runtime",
"id": "8b73396963e05f359edb2f4a62f744b7c52226d9",
"size": "7697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/cohorte/utils/vote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "15319"
},
{
"name": "HTML",
"bytes": "12092"
},
{
"name": "Java",
"bytes": "857542"
},
{
"name": "JavaScript",
"bytes": "43722"
},
{
"name": "Python",
"bytes": "3678527"
},
{
"name": "Shell",
"bytes": "800"
}
],
"symlink_target": ""
}
|
import abc
import collections
import contextlib
import dataclasses
import logging
import os
import pathlib
import re
import tempfile
from types import MappingProxyType
from typing import (
Any,
Callable,
Counter,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from uqbar.objects import new
import supriya.nonrealtime # noqa
import supriya.realtime # noqa
from supriya import commands, nonrealtime, realtime
from supriya.assets.synthdefs.default import default
from supriya.enums import AddAction, CalculationRate, ParameterRate
from supriya.nonrealtime import Session
from supriya.realtime import AsyncServer, BaseServer, Server
from supriya.scsynth import Options
from supriya.synthdefs import SynthDef
from supriya.typing import AddActionLike, HeaderFormatLike, SampleFormatLike
logger = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class Proxy:
provider: "Provider"
@dataclasses.dataclass(frozen=True)
class BufferProxy:
provider: "Provider"
identifier: Union["supriya.nonrealtime.Buffer", int]
channel_count: Optional[int] = None
frame_count: Optional[int] = None
file_path: Optional[os.PathLike] = None
starting_frame: Optional[int] = None
def __float__(self) -> float:
return float(int(self))
def __int__(self) -> int:
return int(self.identifier)
def close(self) -> None:
pass
def free(self) -> None:
self.provider.free_buffer(self)
def normalize(self, new_maximum: float = 1.0) -> None:
pass
def read(self, file_path: os.PathLike, leave_open: bool = False) -> None:
pass
def write(
self,
file_path: os.PathLike,
frame_count: Optional[int] = None,
header_format: HeaderFormatLike = "aiff",
leave_open: bool = False,
sample_format: SampleFormatLike = "int24",
starting_frame: Optional[int] = None,
) -> None:
pass
def as_allocate_request(
self,
) -> Union[
commands.BufferAllocateRequest,
commands.BufferAllocateReadRequest,
commands.BufferAllocateReadChannelRequest,
]:
kwargs: Dict[str, Any] = dict(buffer_id=int(self), frame_count=self.frame_count)
if self.file_path is None:
return commands.BufferAllocateRequest(
**kwargs, channel_count=self.channel_count
)
kwargs["file_path"] = self.file_path
kwargs["starting_frame"] = self.starting_frame
if self.channel_count is None:
return commands.BufferAllocateReadRequest(**kwargs)
return commands.BufferAllocateReadChannelRequest(
**kwargs, channel_indices=list(range(self.channel_count))
)
def as_free_request(self) -> commands.BufferFreeRequest:
return commands.BufferFreeRequest(buffer_id=int(self))
@dataclasses.dataclass(frozen=True)
class OscCallbackProxy(Proxy):
provider: "Provider"
identifier: Any
def unregister(self) -> None:
self.provider.unregister_osc_callback(self)
@dataclasses.dataclass(frozen=True)
class BusProxy(Proxy):
calculation_rate: CalculationRate
provider: "Provider"
identifier: Union["supriya.nonrealtime.Bus", int]
def __float__(self) -> float:
return float(int(self))
def __int__(self) -> int:
return int(self.identifier)
def set_(self, value) -> None:
self.provider.set_bus(self, value)
def free(self) -> None:
self.provider.free_bus(self)
@property
def map_symbol(self) -> str:
if self.calculation_rate == CalculationRate.AUDIO:
return f"a{int(self)}"
return f"c{int(self)}"
@dataclasses.dataclass(frozen=True)
class BusGroupProxy(Proxy):
calculation_rate: CalculationRate
channel_count: int
identifier: Union["supriya.nonrealtime.BusGroup", int]
provider: "Provider"
buses: Sequence["BusProxy"] = dataclasses.field(init=False)
def __post_init__(self):
if isinstance(self.identifier, int):
bus_identifiers = range(
self.identifier, self.identifier + self.channel_count
)
else:
bus_identifiers = self.identifier[:]
object.__setattr__(
self,
"buses",
tuple(
BusProxy(
calculation_rate=self.calculation_rate,
provider=self.provider,
identifier=bus_identifier,
)
for bus_identifier in bus_identifiers
),
)
def __float__(self) -> float:
return float(int(self))
def __getitem__(self, item) -> BusProxy:
return self.buses[item]
def __int__(self) -> int:
return int(self.identifier)
def __len__(self) -> int:
return self.channel_count
def free(self) -> None:
self.provider.free_bus_group(self)
@property
def map_symbol(self) -> str:
return self[0].map_symbol
@dataclasses.dataclass(frozen=True)
class NodeProxy(Proxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
def __float__(self) -> float:
return float(int(self))
def __int__(self) -> int:
return int(self.identifier)
def __setitem__(self, key, value) -> None:
self.provider.set_node(self, **{key: value})
def add_group(
self,
*,
add_action: AddActionLike = AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
parallel: bool = False,
) -> "GroupProxy":
return self.provider.add_group(
add_action=add_action, target_node=self, parallel=parallel
)
def add_synth(
self,
*,
synthdef: Optional[SynthDef] = None,
add_action: AddActionLike = AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> "SynthProxy":
return self.provider.add_synth(
add_action=add_action, synthdef=synthdef, target_node=self, **settings
)
def as_move_request(
self, add_action: AddActionLike, target_node: "NodeProxy"
) -> commands.MoveRequest:
request_classes: Dict[int, Type[commands.MoveRequest]] = {
AddAction.ADD_TO_HEAD: commands.GroupHeadRequest,
AddAction.ADD_TO_TAIL: commands.GroupTailRequest,
AddAction.ADD_BEFORE: commands.NodeBeforeRequest,
AddAction.ADD_AFTER: commands.NodeAfterRequest,
}
request_class: Type[commands.MoveRequest] = request_classes[
AddAction.from_expr(add_action)
]
return request_class(
node_id_pairs=[request_class.NodeIdPair(int(self), int(target_node))]
)
def as_set_request(self, **settings) -> commands.NodeSetRequest:
coerced_settings = {}
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
if value.calculation_rate == CalculationRate.AUDIO:
value = f"a{value.identifier}"
else:
value = f"c{value.identifier}"
coerced_settings[key] = value
return commands.NodeSetRequest(node_id=int(self), **coerced_settings)
def dispose(self) -> None:
self.provider.dispose(self)
def free(self) -> None:
self.provider.free_node(self)
def move(self, add_action: AddActionLike, target_node: "NodeProxy") -> None:
self.provider.move_node(self, add_action, target_node)
@dataclasses.dataclass(frozen=True)
class GroupProxy(NodeProxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
parallel: bool = False
def as_add_request(
self, add_action, target_node
) -> Union[commands.GroupNewRequest, commands.ParallelGroupNewRequest]:
request_method = commands.GroupNewRequest
if self.parallel:
request_method = commands.ParallelGroupNewRequest
return request_method(
items=[
request_method.Item(
node_id=int(self.identifier),
add_action=add_action,
target_node_id=int(target_node),
)
]
)
def as_free_request(self, force=False) -> commands.NodeFreeRequest:
return commands.NodeFreeRequest(node_ids=[int(self)])
@dataclasses.dataclass(frozen=True)
class SynthProxy(NodeProxy):
identifier: Union["supriya.nonrealtime.Node", int]
provider: "Provider"
synthdef: SynthDef
settings: Dict[str, Union[float, BusGroupProxy]]
def as_add_request(self, add_action, target_node) -> commands.SynthNewRequest:
# TODO: Handle map symbols
# If arg is a bus proxy, and synth param is scalar, cast to int
# Elif arg is a bus proxy, and synth param not scalar, map
# Else cast to float
synthdef = self.synthdef or default
synthdef_kwargs: Dict[str, Union[float, str]] = {}
for _, parameter in synthdef.indexed_parameters:
if parameter.name not in self.settings:
continue
value = self.settings[parameter.name]
if value == parameter.value:
continue
if parameter.parameter_rate == ParameterRate.SCALAR:
synthdef_kwargs[parameter.name] = float(value)
elif parameter.name in ("in_", "out"):
synthdef_kwargs[parameter.name] = float(value)
elif isinstance(value, (BusProxy, BusGroupProxy)):
synthdef_kwargs[parameter.name] = value.map_symbol
else:
synthdef_kwargs[parameter.name] = float(value)
return commands.SynthNewRequest(
node_id=int(self.identifier),
add_action=add_action,
target_node_id=int(target_node),
synthdef=synthdef,
**synthdef_kwargs,
)
def as_free_request(
self, force=False
) -> Union[commands.NodeFreeRequest, commands.NodeSetRequest]:
if force or "gate" not in self.synthdef.parameters:
return commands.NodeFreeRequest(node_ids=[int(self)])
return commands.NodeSetRequest(node_id=int(self), gate=0)
@dataclasses.dataclass(frozen=True)
class ProviderMoment:
provider: "Provider"
seconds: float
bus_settings: List[Tuple[BusProxy, float]] = dataclasses.field(default_factory=list)
buffer_additions: List[BufferProxy] = dataclasses.field(default_factory=list)
buffer_removals: List[BufferProxy] = dataclasses.field(default_factory=list)
node_reorderings: List[Tuple[NodeProxy, AddAction, NodeProxy]] = dataclasses.field(
default_factory=list
)
node_additions: List[Tuple[NodeProxy, AddAction, NodeProxy]] = dataclasses.field(
default_factory=list
)
node_removals: List[NodeProxy] = dataclasses.field(default_factory=list)
node_settings: List[
Tuple[NodeProxy, Dict[str, Union[float, BusGroupProxy]]]
] = dataclasses.field(default_factory=list)
wait: bool = dataclasses.field(default=False)
exit_stack: contextlib.ExitStack = dataclasses.field(
init=False, default_factory=contextlib.ExitStack, compare=False
)
async def __aenter__(self):
if self.provider.server and not isinstance(self.provider.server, AsyncServer):
raise RuntimeError(repr(self.provider.server))
return self._enter()
async def __aexit__(self, *args):
results = self._exit()
if not results:
return
timestamp, request_bundle, synthdefs = results
server = self.provider.server
# The underlying asyncio UDP transport will silently drop oversize packets
if len(request_bundle.to_datagram()) <= 8192:
if self.wait:
# If waiting, the original ProviderMoment timestamp can be ignored
await request_bundle.communicate_async(server=server, sync=True)
else:
server.send(request_bundle.to_osc())
else:
# If over the UDP packet limit, partition the message
requests = request_bundle.contents
# Always wait for SynthDefs to load.
if synthdefs:
synthdef_request = requests[0]
requests = synthdef_request.callback.contents or []
synthdef_request = new(synthdef_request, callback=None)
await synthdef_request.communicate_async(sync=True, server=server)
if self.wait:
# If waiting, the original ProviderMoment timestamp can be ignored
for bundle in commands.RequestBundle.partition(requests):
await bundle.communicate_async(server=server, sync=True)
else:
for bundle in commands.RequestBundle.partition(
requests, timestamp=timestamp
):
server.send(bundle.to_osc())
def __enter__(self):
if self.provider.session is not None:
self.exit_stack.enter_context(self.provider.session.at(self.seconds or 0))
if self.provider.server and not isinstance(self.provider.server, Server):
raise RuntimeError(repr(self.provider.server))
return self._enter()
def __exit__(self, *args):
results = self._exit()
if not results:
return
timestamp, request_bundle, synthdefs = results
try:
self.provider.server.send(request_bundle.to_osc())
except OSError:
requests = request_bundle.contents
if synthdefs:
synthdef_request = requests[0]
requests = synthdef_request.callback.contents or []
synthdef_request = new(synthdef_request, callback=None)
synthdef_request.communicate(sync=True, server=self.provider.server)
for bundle in commands.RequestBundle.partition(
requests, timestamp=timestamp
):
self.provider.server.send(bundle.to_osc())
def _enter(self):
self.provider._moments.append(self)
self.provider._counter[self.seconds] += 1
return self
def _exit(self):
self.exit_stack.close()
self.provider._moments.pop()
self.provider._counter[self.seconds] -= 1
if not self.provider._counter[self.seconds]:
self.provider._counter.pop(self.seconds)
if not self.provider.server:
return
elif self.provider._counter[self.seconds]:
return
requests = []
synthdefs = set()
new_nodes = set()
for buffer_proxy in self.buffer_additions:
requests.append(buffer_proxy.as_allocate_request())
for node_proxy, add_action, target_node in self.node_additions:
request = node_proxy.as_add_request(add_action, target_node)
if isinstance(request, commands.SynthNewRequest):
if request.synthdef not in self.provider.server:
synthdefs.add(request.synthdef)
requests.append(request)
new_nodes.add(node_proxy.identifier)
for node_proxy, add_action, target_node in self.node_reorderings:
requests.append(node_proxy.as_move_request(add_action, target_node))
for node_proxy, settings in self.node_settings:
requests.append(node_proxy.as_set_request(**settings))
for node_proxy in self.node_removals:
requests.append(
node_proxy.as_free_request(force=node_proxy.identifier in new_nodes)
)
for buffer_proxy in self.buffer_removals:
requests.append(buffer_proxy.as_free_request())
if self.bus_settings:
sorted_pairs = sorted(
dict(
(int(bus_proxy.identifier), value)
for bus_proxy, value in self.bus_settings
).items()
)
request = commands.ControlBusSetRequest(index_value_pairs=sorted_pairs)
requests.append(request)
if not requests:
return
timestamp = self.seconds
if timestamp is not None:
timestamp += self.provider._latency
if synthdefs:
request_bundle = commands.RequestBundle(
timestamp=timestamp,
contents=[
commands.SynthDefReceiveRequest(
synthdefs=sorted(synthdefs, key=lambda x: x.actual_name),
callback=commands.RequestBundle(contents=requests),
)
],
)
# check bundle size, write synthdefs to disk and do /d_load
if len(request_bundle.to_datagram(with_placeholders=True)) > 8192:
directory_path = pathlib.Path(tempfile.mkdtemp())
# directory_path = pathlib.Path("~/Desktop").resolve()
for synthdef in synthdefs:
name = synthdef.anonymous_name
if synthdef.name:
name += "-" + re.sub(r"[^\w]", "-", synthdef.name)
file_name = "{}.scsyndef".format(name)
synthdef_path = directory_path / file_name
synthdef_path.write_bytes(synthdef.compile())
request_bundle = commands.RequestBundle(
timestamp=timestamp,
contents=[
supriya.commands.SynthDefLoadDirectoryRequest(
directory_path=directory_path,
callback=commands.RequestBundle(contents=requests),
)
],
)
else:
request_bundle = commands.RequestBundle(
timestamp=timestamp, contents=requests
)
for synthdef in synthdefs:
synthdef._register_with_local_server(server=self.provider.server)
return timestamp, request_bundle, synthdefs
class Provider(metaclass=abc.ABCMeta):
"""
Provides limited realtime/non-realtime compatibility layer.
"""
### INITIALIZER ###
def __init__(self, latency=0.1) -> None:
self._moments: List[ProviderMoment] = []
self._counter: Counter[float] = collections.Counter()
self._latency: float = latency
self._annotation_map: Dict[Union["supriya.nonrealtime.Node", int], str] = {}
### PUBLIC METHODS ###
@abc.abstractmethod
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[os.PathLike] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
raise NotImplementedError
@abc.abstractmethod
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
raise NotImplementedError
@abc.abstractmethod
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
raise NotImplementedError
@abc.abstractmethod
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
parallel: bool = False,
) -> GroupProxy:
raise NotImplementedError
@abc.abstractmethod
def add_synth(
self,
*,
synthdef: Optional[SynthDef] = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
raise NotImplementedError
@abc.abstractmethod
def dispose(self, node_proxy: NodeProxy) -> None:
raise NotImplementedError
@abc.abstractmethod
def free_buffer(self, buffer_proxy) -> None:
raise NotImplementedError
@abc.abstractmethod
def free_bus(self, bus_proxy: BusProxy) -> None:
raise NotImplementedError
@abc.abstractmethod
def free_bus_group(self, bus_group_proxy: BusGroupProxy) -> None:
raise NotImplementedError
@abc.abstractmethod
def free_node(self, node_proxy: NodeProxy) -> None:
raise NotImplementedError
@abc.abstractmethod
def move_node(
self, node_proxy: NodeProxy, add_action: AddActionLike, target_node: NodeProxy
) -> None:
raise NotImplementedError
@abc.abstractmethod
def set_bus(self, bus_proxy: BusProxy, value: float) -> None:
raise NotImplementedError
@abc.abstractmethod
def set_node(self, node_proxy: NodeProxy, **settings) -> None:
raise NotImplementedError
def at(self, seconds=None, wait=False) -> ProviderMoment:
if self._moments and self._moments[-1].seconds == seconds:
provider_moment = self._moments[-1]
else:
provider_moment = ProviderMoment(provider=self, seconds=seconds, wait=wait)
return provider_moment
@classmethod
def from_context(cls, context, latency=0.1) -> "Provider":
if isinstance(context, Session):
return NonrealtimeProvider(context, latency=latency)
elif isinstance(context, BaseServer):
return RealtimeProvider(context, latency=latency)
raise ValueError("Unknown context")
@classmethod
def nonrealtime(cls) -> "NonrealtimeProvider":
session = Session()
return cast("NonrealtimeProvider", cls.from_context(session))
@classmethod
def realtime(
cls, *, options: Optional[Options] = None, **kwargs
) -> "RealtimeProvider":
server = Server()
server.boot(options=options, **kwargs)
return cast("RealtimeProvider", cls.from_context(server))
@classmethod
async def realtime_async(
cls, *, options: Optional[Options] = None, **kwargs
) -> "RealtimeProvider":
server = AsyncServer()
await server.boot(options=options, **kwargs)
return cast("RealtimeProvider", cls.from_context(server))
@abc.abstractmethod
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
raise NotImplementedError
@abc.abstractmethod
def unregister_osc_callback(self, proxy: OscCallbackProxy) -> None:
raise NotImplementedError
### PUBLIC PROPERTIES ###
@property
def annotation_map(self) -> Mapping[Union["supriya.nonrealtime.Node", int], str]:
return MappingProxyType(self._annotation_map)
@property
def latency(self) -> float:
return self._latency
@property
def moment(self) -> Optional[ProviderMoment]:
if self._moments:
return self._moments[-1]
return None
@property
def server(self) -> Optional[BaseServer]:
return None
@property
def session(self) -> Optional[Session]:
return None
class NonrealtimeProvider(Provider):
### INITIALIZER ###
def __init__(self, session: Session, latency: float = 0.1):
if not isinstance(session, Session):
raise ValueError(f"Expected session, got {session}")
Provider.__init__(self, latency=latency)
self._session: Session = session
### SPECIAL METHODS ###
def __str__(self) -> str:
return f"<{type(self).__name__} {self._session!r}>"
### PRIVATE METHODS ###
def _resolve_target_node(self, target_node) -> nonrealtime.Node:
if target_node is None:
target_node = self._session.root_node
elif isinstance(target_node, NodeProxy):
target_node = target_node.identifier
return target_node
### PUBLIC METHODS ###
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[os.PathLike] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self._session.add_buffer(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
starting_frame=starting_frame,
)
return BufferProxy(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
identifier=identifier,
provider=self,
starting_frame=starting_frame,
)
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
identifier = self._session.add_bus(calculation_rate=calculation_rate)
return BusProxy(
calculation_rate=calculation_rate, identifier=identifier, provider=self
)
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
if channel_count < 1:
raise ValueError("Channel-count must be positive, non-zero integer")
identifier = self._session.add_bus_group(
bus_count=channel_count, calculation_rate=calculation_rate
)
return BusGroupProxy(
calculation_rate=calculation_rate,
channel_count=channel_count,
identifier=identifier,
provider=self,
)
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
parallel: bool = False,
) -> GroupProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self._resolve_target_node(target_node).add_group(
add_action=add_action
)
proxy = GroupProxy(identifier=identifier, provider=self, parallel=parallel)
return proxy
def add_synth(
self,
*,
synthdef: Optional[SynthDef] = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
if not self.moment:
raise ValueError("No current moment")
sanitized_settings = {}
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
value = value.identifier
sanitized_settings[key] = value
identifier = self._resolve_target_node(target_node).add_synth(
add_action=add_action, synthdef=synthdef, **sanitized_settings
)
proxy = SynthProxy(
identifier=identifier,
provider=self,
synthdef=synthdef or default,
settings=settings,
)
return proxy
def free_buffer(self, buffer_: BufferProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def dispose(self, node_proxy: NodeProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_bus(self, bus: BusProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_bus_group(self, bus_group: BusGroupProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_node(self, node_proxy: NodeProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
cast(nonrealtime.Node, node_proxy.identifier).free()
def move_node(
self,
node_proxy: NodeProxy,
add_action: AddActionLike,
target_node: Union[NodeProxy, nonrealtime.Node],
) -> None:
if not self.moment:
raise ValueError("No current moment")
self._resolve_target_node(target_node).move_node(
node_proxy.identifier, add_action=add_action
)
def set_bus(self, bus_proxy: BusProxy, value: float) -> None:
if not self.moment:
raise ValueError("No current moment")
elif bus_proxy.calculation_rate != CalculationRate.CONTROL:
raise ValueError("Can only set control-rate buses")
cast(nonrealtime.Bus, bus_proxy.identifier).set_(value)
def set_node(self, node_proxy: NodeProxy, **settings) -> None:
if not self.moment:
raise ValueError("No current moment")
for key, value in settings.items():
if isinstance(value, (BusProxy, BusGroupProxy)):
value = value.identifier
cast(nonrealtime.Node, node_proxy.identifier)[key] = value
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
return OscCallbackProxy(provider=self, identifier=None)
def unregister_osc_callback(self, proxy: OscCallbackProxy) -> None:
pass # no-op
@property
def session(self) -> Optional[Session]:
return self._session
class RealtimeProvider(Provider):
### INITIALIZER ###
def __init__(self, server: BaseServer, latency: float = 0.1):
if not isinstance(server, BaseServer):
raise ValueError(f"Expected Server, got {server}")
Provider.__init__(self, latency=latency)
self._server = server
### SPECIAL METHODS ###
def __str__(self) -> str:
return f"<{type(self).__name__} {self._server!r}>"
### PRIVATE METHODS ###
def _resolve_target_node(self, target_node):
if target_node is None:
# TODO: Will this work with AsyncServer?
target_node = self._server.default_group
return target_node
### PUBLIC METHODS ###
def add_buffer(
self,
*,
channel_count: Optional[int] = None,
file_path: Optional[os.PathLike] = None,
frame_count: Optional[int] = None,
starting_frame: Optional[int] = None,
) -> BufferProxy:
if not self.moment:
raise ValueError("No current moment")
identifier = self._server.buffer_allocator.allocate(1)
proxy = BufferProxy(
channel_count=channel_count,
file_path=file_path,
frame_count=frame_count,
identifier=identifier,
provider=self,
starting_frame=starting_frame,
)
self.moment.buffer_additions.append(proxy)
return proxy
def add_bus(self, calculation_rate=CalculationRate.CONTROL) -> BusProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
allocator = realtime.Bus._get_allocator(calculation_rate, server=self._server)
identifier = allocator.allocate(1)
return BusProxy(
calculation_rate=calculation_rate, identifier=identifier, provider=self
)
def add_bus_group(
self, channel_count=1, calculation_rate=CalculationRate.CONTROL
) -> BusGroupProxy:
if not self.moment:
raise ValueError("No current moment")
calculation_rate = CalculationRate.from_expr(calculation_rate)
if calculation_rate not in (CalculationRate.AUDIO, CalculationRate.CONTROL):
raise ValueError(f"Invalid calculation rate: {calculation_rate!r}")
if channel_count < 1:
raise ValueError("Channel-count must be positive, non-zero integer")
allocator = realtime.Bus._get_allocator(calculation_rate, server=self._server)
identifier = allocator.allocate(channel_count)
if identifier is None:
raise RuntimeError
return BusGroupProxy(
calculation_rate=calculation_rate,
channel_count=channel_count,
identifier=identifier,
provider=self,
)
def add_group(
self,
*,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
parallel: bool = False,
) -> GroupProxy:
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
identifier = self._server.node_id_allocator.allocate_node_id(1)
proxy = GroupProxy(identifier=identifier, provider=self, parallel=parallel)
self.moment.node_additions.append((proxy, add_action, target_node))
if name:
self._annotation_map[identifier] = name
return proxy
def add_synth(
self,
*,
synthdef: Optional[SynthDef] = None,
target_node=None,
add_action=AddAction.ADD_TO_HEAD,
name: Optional[str] = None,
**settings,
) -> SynthProxy:
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
identifier = self._server.node_id_allocator.allocate_node_id(1)
proxy = SynthProxy(
identifier=identifier,
provider=self,
synthdef=synthdef or default,
settings=settings,
)
self.moment.node_additions.append((proxy, add_action, target_node))
if name:
self._annotation_map[identifier] = name
return proxy
def dispose(self, node_proxy: NodeProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
return # This is currently a no-op
def free_buffer(self, buffer_: BufferProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
self._server.buffer_allocator.free(cast(int, buffer_.identifier))
self.moment.buffer_removals.append(buffer_)
def free_bus(self, bus_proxy: BusProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
allocator = realtime.Bus._get_allocator(
bus_proxy.calculation_rate, server=self._server
)
allocator.free(cast(int, bus_proxy.identifier))
def free_bus_group(self, bus_group_proxy: BusGroupProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
allocator = realtime.Bus._get_allocator(
bus_group_proxy.calculation_rate, server=self._server
)
allocator.free(cast(int, bus_group_proxy.identifier))
def free_node(self, node_proxy: NodeProxy) -> None:
if not self.moment:
raise ValueError("No current moment")
self.moment.node_removals.append(node_proxy)
self._annotation_map.pop(node_proxy.identifier, None)
def move_node(
self, node_proxy: NodeProxy, add_action: AddActionLike, target_node: NodeProxy
) -> None:
if not self.moment:
raise ValueError("No current moment")
target_node = self._resolve_target_node(target_node)
self.moment.node_reorderings.append(
(node_proxy, AddAction.from_expr(add_action), target_node)
)
def set_bus(self, bus_proxy: BusProxy, value: float) -> None:
if not self.moment:
raise ValueError("No current moment")
elif bus_proxy.calculation_rate != CalculationRate.CONTROL:
raise ValueError("Can only set control-rate buses")
self.moment.bus_settings.append((bus_proxy, value))
def set_node(self, node_proxy: NodeProxy, **settings) -> None:
if not self.moment:
raise ValueError("No current moment")
self.moment.node_settings.append((node_proxy, settings))
def register_osc_callback(
self, pattern: Tuple[Union[str, float], ...], procedure: Callable
) -> OscCallbackProxy:
identifier = self._server.osc_protocol.register(
pattern=pattern, procedure=procedure
)
return OscCallbackProxy(provider=self, identifier=identifier)
def unregister_osc_callback(self, proxy: OscCallbackProxy) -> None:
self._server.osc_protocol.unregister(proxy.identifier)
@property
def server(self) -> Optional[BaseServer]:
return self._server
|
{
"content_hash": "4fbded495fb2b15adef574202fb5a0bc",
"timestamp": "",
"source": "github",
"line_count": 1056,
"max_line_length": 88,
"avg_line_length": 35.18465909090909,
"alnum_prop": 0.6126497106715112,
"repo_name": "josiah-wolf-oberholtzer/supriya",
"id": "c2d28a22f216b20318f1944f635f19dbf25b2d04",
"size": "37155",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "supriya/providers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "26333"
},
{
"name": "Makefile",
"bytes": "1792"
},
{
"name": "Python",
"bytes": "2331463"
},
{
"name": "SuperCollider",
"bytes": "318"
}
],
"symlink_target": ""
}
|
""" Astropy coordinate class for the Sagittarius coordinate system """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import numpy as np
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.angles import rotation_matrix
import astropy.coordinates as coord
import astropy.units as u
__all__ = ["Orphan"]
class Orphan(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Orphan stream, as described in
`this paper <http://iopscience.iop.org/0004-637X/711/1/32/pdf/apj_711_1_32.pdf>`_.
For more information about this class, see the Astropy documentation
on `Coordinate Frames <http://docs.astropy.org/en/latest/coordinates/frames.html>`_.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
"""
default_representation = coord.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')],
'unitspherical': [coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta')]
}
# Define the Euler angles
phi = np.radians(128.79)
theta = np.radians(54.39)
psi = np.radians(90.70)
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(phi, "z", unit=u.radian)
C = rotation_matrix(theta, "x", unit=u.radian)
B = rotation_matrix(psi, "z", unit=u.radian)
sgr_matrix = np.array(B.dot(C).dot(D))
# Galactic to Sgr coordinates
@frame_transform_graph.transform(coord.FunctionTransform, coord.Galactic, Orphan)
def galactic_to_orp(gal_coord, sgr_frame):
""" Compute the transformation from Galactic spherical to
heliocentric Sgr coordinates.
"""
l = np.atleast_1d(gal_coord.l.radian)
b = np.atleast_1d(gal_coord.b.radian)
X = np.cos(b)*np.cos(l)
Y = np.cos(b)*np.sin(l)
Z = np.sin(b)
# Calculate X,Y,Z,distance in the Sgr system
Xs, Ys, Zs = sgr_matrix.dot(np.array([X, Y, Z]))
# Calculate the angular coordinates lambda,beta
Lambda = np.arctan2(Ys, Xs)*u.radian
Lambda[Lambda < 0] = Lambda[Lambda < 0] + 2.*np.pi*u.radian
Beta = np.arcsin(Zs/np.sqrt(Xs*Xs+Ys*Ys+Zs*Zs))*u.radian
return Orphan(Lambda=Lambda, Beta=Beta,
distance=gal_coord.distance)
# Sgr to Galactic coordinates
@frame_transform_graph.transform(coord.FunctionTransform, Orphan, coord.Galactic)
def orp_to_galactic(orp_coord, gal_frame):
""" Compute the transformation from heliocentric Sgr coordinates to
spherical Galactic.
"""
L = np.atleast_1d(orp_coord.Lambda.radian)
B = np.atleast_1d(orp_coord.Beta.radian)
Xs = np.cos(B)*np.cos(L)
Ys = np.cos(B)*np.sin(L)
Zs = np.sin(B)
X, Y, Z = sgr_matrix.T.dot(np.array([Xs, Ys, Zs]))
l = np.arctan2(Y, X)*u.radian
b = np.arcsin(Z/np.sqrt(X*X+Y*Y+Z*Z))*u.radian
l[l<0] += 2*np.pi*u.radian
return coord.Galactic(l=l, b=b, distance=orp_coord.distance)
|
{
"content_hash": "947b5f62efa6a94447e9b2e340df3498",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 88,
"avg_line_length": 34.76190476190476,
"alnum_prop": 0.6745205479452054,
"repo_name": "abonaca/gary",
"id": "f0b0b24aa2842a341cf87a39bb6097b7e05640c8",
"size": "3667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gary/coordinates/orphan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "67332"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Python",
"bytes": "490956"
}
],
"symlink_target": ""
}
|
"""
dj-stripe PaymentIntent Model Tests.
"""
from copy import deepcopy
from unittest.mock import patch
import pytest
import stripe
from django.test import TestCase
from djstripe.enums import PaymentIntentStatus
from djstripe.models import Account, Customer, PaymentIntent
from tests import (
FAKE_ACCOUNT,
FAKE_CUSTOMER,
FAKE_PAYMENT_INTENT_DESTINATION_CHARGE,
FAKE_PAYMENT_INTENT_I,
FAKE_PAYMENT_METHOD_I,
AssertStripeFksMixin,
)
pytestmark = pytest.mark.django_db
class TestStrPaymentIntent:
#
# Helpers
#
def get_fake_payment_intent_destination_charge_no_customer():
FAKE_PAYMENT_INTENT_DESTINATION_CHARGE_NO_CUSTOMER = deepcopy(
FAKE_PAYMENT_INTENT_DESTINATION_CHARGE
)
FAKE_PAYMENT_INTENT_DESTINATION_CHARGE_NO_CUSTOMER["customer"] = None
return FAKE_PAYMENT_INTENT_DESTINATION_CHARGE_NO_CUSTOMER
def get_fake_payment_intent_i_no_customer():
FAKE_PAYMENT_INTENT_I_NO_CUSTOMER = deepcopy(FAKE_PAYMENT_INTENT_I)
FAKE_PAYMENT_INTENT_I_NO_CUSTOMER["customer"] = None
return FAKE_PAYMENT_INTENT_I_NO_CUSTOMER
@pytest.mark.parametrize(
"fake_intent_data, has_account, has_customer",
[
(FAKE_PAYMENT_INTENT_I, False, True),
(FAKE_PAYMENT_INTENT_DESTINATION_CHARGE, True, True),
(get_fake_payment_intent_destination_charge_no_customer(), True, False),
(get_fake_payment_intent_i_no_customer(), False, False),
],
)
def test___str__(self, fake_intent_data, has_account, has_customer, monkeypatch):
def mock_customer_get(*args, **kwargs):
return deepcopy(FAKE_CUSTOMER)
def mock_account_get(*args, **kwargs):
data = deepcopy(FAKE_ACCOUNT)
# Otherwise Account.api_retrieve will invoke File.api_retrieve...
data["settings"]["branding"] = {}
return data
def mock_payment_method_get(*args, **kwargs):
return deepcopy(FAKE_PAYMENT_METHOD_I)
# monkeypatch stripe.Product.retrieve, stripe.Price.retrieve, and stripe.PaymentMethod.retrieve calls to return
# the desired json response.
monkeypatch.setattr(stripe.Account, "retrieve", mock_account_get)
monkeypatch.setattr(stripe.Customer, "retrieve", mock_customer_get)
monkeypatch.setattr(stripe.PaymentMethod, "retrieve", mock_payment_method_get)
pi = PaymentIntent.sync_from_stripe_data(fake_intent_data)
account = Account.objects.filter(id=fake_intent_data["on_behalf_of"]).first()
customer = Customer.objects.filter(id=fake_intent_data["customer"]).first()
if has_account and has_customer:
assert (
f"{pi.human_readable_amount} ({PaymentIntentStatus.humanize(fake_intent_data['status'])}) "
f"for {account} "
f"by {customer}"
) == str(pi)
elif has_account and not has_customer:
assert (
f"{pi.human_readable_amount} for {account}. {PaymentIntentStatus.humanize(fake_intent_data['status'])}"
) == str(pi)
elif has_customer and not has_account:
assert (
f"{pi.human_readable_amount} by {customer}. {PaymentIntentStatus.humanize(fake_intent_data['status'])}"
) == str(pi)
elif not has_customer and not has_account:
f"{pi.human_readable_amount} ({PaymentIntentStatus.humanize(fake_intent_data['status'])})" == str(
pi
)
class PaymentIntentTest(AssertStripeFksMixin, TestCase):
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_sync_from_stripe_data(self, customer_retrieve_mock):
fake_payment_intent = deepcopy(FAKE_PAYMENT_INTENT_I)
payment_intent = PaymentIntent.sync_from_stripe_data(fake_payment_intent)
self.assert_fks(
payment_intent,
expected_blank_fks={
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.Customer.subscriber",
"djstripe.PaymentIntent.invoice (related name)",
"djstripe.PaymentIntent.on_behalf_of",
"djstripe.PaymentIntent.payment_method",
"djstripe.PaymentIntent.upcominginvoice (related name)",
},
)
# TODO - PaymentIntent should probably sync invoice (reverse OneToOneField)
# self.assertIsNotNone(payment_intent.invoice)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_status_enum(self, customer_retrieve_mock):
fake_payment_intent = deepcopy(FAKE_PAYMENT_INTENT_I)
for status in (
"requires_payment_method",
"requires_confirmation",
"requires_action",
"processing",
"requires_capture",
"canceled",
"succeeded",
):
fake_payment_intent["status"] = status
payment_intent = PaymentIntent.sync_from_stripe_data(fake_payment_intent)
# trigger model field validation (including enum value choices check)
payment_intent.full_clean()
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_canceled_intent(self, customer_retrieve_mock):
fake_payment_intent = deepcopy(FAKE_PAYMENT_INTENT_I)
fake_payment_intent["status"] = "canceled"
fake_payment_intent["canceled_at"] = 1567524169
for reason in (
None,
"duplicate",
"fraudulent",
"requested_by_customer",
"abandoned",
"failed_invoice",
"void_invoice",
"automatic",
):
fake_payment_intent["cancellation_reason"] = reason
payment_intent = PaymentIntent.sync_from_stripe_data(fake_payment_intent)
if reason is None:
# enums nulls are coerced to "" by StripeModel._stripe_object_to_record
self.assertEqual(payment_intent.cancellation_reason, "")
else:
self.assertEqual(payment_intent.cancellation_reason, reason)
# trigger model field validation (including enum value choices check)
payment_intent.full_clean()
|
{
"content_hash": "c7aedb91d849a5334a612e07705bede0",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 120,
"avg_line_length": 37.172413793103445,
"alnum_prop": 0.6239950525664811,
"repo_name": "pydanny/dj-stripe",
"id": "6e15519470641889bc369459cc88f8eb06338aef",
"size": "6468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_payment_intent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "21431"
},
{
"name": "Python",
"bytes": "322111"
}
],
"symlink_target": ""
}
|
import math
import sys
from numpy import (
float32,
putmask,
shape,
zeros,
)
# This is the average of all species in the alignment outside of exons
# > mean(r)
# A T C G
# 0.2863776 0.2878264 0.2129560 0.2128400
# > sd(r)
# A T C G
# 0.01316192 0.01371148 0.01293836 0.01386655
ENCODE_NONCODING_BACKGROUND = {"A": 0.2863776, "T": 0.2878264, "G": 0.2128400, "C": 0.2129560}
class Align:
def __init__(self, seqrows, headers=None):
self.rows = seqrows
self.nrows = len(seqrows)
ncol = None
for rownum, row in enumerate(self.rows):
try:
if ncol is None:
ncol = len(row)
elif ncol != len(row):
raise ValueError(
"Align: __init__:alignment block:row %d does not have %d columns, it has %d"
% (rownum, ncol, len(row))
)
except Exception:
print(row)
raise Exception("")
self.ncols = ncol
self.dims = (self.nrows, self.ncols)
self.headers = headers
def __str__(self):
return "\n".join(self.rows)
class AlignScoreMatrix:
def __init__(self, align):
nan = float("nan")
matrix = zeros((align.nrows, align.ncols), float32)
# set to nans
for ir in range(len(matrix)):
for ic in range(len(matrix[ir])):
matrix[ir][ic] = nan
self.matrix = matrix
def __len__(self):
return shape(self.matrix)[1]
def __str__(self):
print(self.matrix)
def score_align_motif(align, motif, gapmask=None, byPosition=True):
chr, chr_start, chr_stop = align.headers[0]
# a blank score matrix
nrows, ncols = align.dims
ascoremax = AlignScoreMatrix(align)
scoremax = ascoremax.matrix
minSeqLen = len(motif)
for ir in range(nrows):
pass
# row is missing data
if isnan(align.rows[ir][0]):
continue
for start in range(ncols):
if align.rows[ir][start] == "-":
continue
elif align.rows[ir][start] == "n":
continue
elif align.rows[ir][start] == "N":
continue
# get enough sequence for the weight matrix
subseq = ""
end = 0
ic = start
while len(subseq) < minSeqLen:
if ic >= len(align.rows[ir]):
break
char = align.rows[ir][ic].upper()
ic += 1
if char == "-" or char == "N":
continue
else:
subseq += char
if len(subseq) == minSeqLen:
end = ic + 1
for_score = int(match_consensus(subseq, motif))
revseq = reverse_complement(subseq)
rev_score = int(match_consensus(revseq, motif))
score = max(for_score, rev_score)
# dbg
# if ir == 0: print >>sys.stderr, int(chr_start) + start - align.rows[ir].count('-',0,start), subseq, score
# replace the alignment positions with the result
if byPosition:
scoremax[ir][start] = score
else:
# replace positions matching the width of the pwm
for i in range(start, end):
if isnan(scoremax[ir][i]):
scoremax[ir][i] = score
elif score > scoremax[ir][i]:
scoremax[ir][i] = score
# break
# mask gap characters
if gapmask is None:
gapmask = score_align_gaps(align)
putmask(scoremax, gapmask, float("nan"))
return scoremax
# -----------
#
# WeightMatrix--
# A position weight matrix (PWM) representation of a motif.
#
# ----------
# construction arguments:
# id: id (name) of the motif
# rows: the matrix; each row is a hash from symbol to weight, with
# .. the weight in string form
# alphabet: symbols allowed
# background: hash from symbol to background probability of that symbol; if
# .. not specified, ENCODE_NONCODING_BACKGROUND is used
# internal fields:
# rows: the matrix; each row is a hash from symbol to log-odds score
# .. of that symbol for that row of the weight matrix
# counts: the matrix; count[row][sym] is the weight, as an integer
# probs: the matrix; probs[row][sym] is the weight, as an probability
# ----------
class PositionWeightMatrix:
complementMap = str.maketrans("ACGTacgt", "TGCAtgca")
# IUPAC-IUB
symbols = {
"A": frozenset(["A"]),
"C": frozenset(["C"]),
"G": frozenset(["G"]),
"T": frozenset(["T"]),
"R": frozenset(["A", "G"]),
"Y": frozenset(["C", "T"]),
"M": frozenset(["A", "C"]),
"K": frozenset(["G", "T"]),
"S": frozenset(["G", "C"]),
"W": frozenset(["A", "T"]),
"H": frozenset(["A", "C", "T"]),
"B": frozenset(["G", "T", "C"]),
"V": frozenset(["G", "C", "A"]),
"D": frozenset(["G", "T", "A"]),
}
def __init__(self, id, rows, alphabet, background=None, score_correction=True):
self.id = id
self.alphabet = alphabet
nsymbols = len(self.alphabet)
for i in range(len(self.alphabet)):
self.alphabet[i] = self.alphabet[i].upper()
if background is not None:
self.background = background
else:
self.background = {}
sorted_alphabet = []
sorted_alphabet[:] = self.alphabet[:]
sorted_alphabet.sort()
if ["A", "C", "G", "T"] == sorted_alphabet:
self.background = ENCODE_NONCODING_BACKGROUND
else:
for x in self.alphabet:
self.background[x] = float(1) / len(self.alphabet)
if score_correction:
self.score_correction = self.corrected_probability_score
else:
self.score_correction = self.simple_probability
# partition counts from consensus symbol
# in order to properly handle scaling in the presense of non-integers,
# we prescan the matrix to figure out the largest scale factor, then go
# back through and scale 'em all (some rows may be integer counts,
# others may be probabilities)
self.consensus = []
scale = 1
for i in range(len(rows)):
# try:
fields, consensus = rows[i][:nsymbols], rows[i][-1]
for x, count in enumerate(fields):
try:
(w, s) = self.parse_weight(count)
except ValueError:
raise ValueError("pwm row {} has bad weight {}".format(" ".join(fields), w))
# replace row counts with (values,scale)
rows[i][x] = (w, s)
scale = max(s, scale)
self.consensus.append(consensus)
hashRows = []
self.matrix_base_counts = {} # for pseudocounts
self.counts = [] # for scaled counts
self.probs = [] # for probabilities
# scale counts to integers
for i in range(len(rows)):
hashRows.append(dict())
for x, sym in enumerate(alphabet):
(w, s) = rows[i][x]
hashRows[i][sym] = w * scale / s
assert hashRows[i][sym] >= 0
if sym not in self.matrix_base_counts:
self.matrix_base_counts[sym] = 0
self.matrix_base_counts[sym] += hashRows[i][sym]
self.counts.append(hashRows[i].copy())
self.probs.append(hashRows[i].copy())
totalWeight = float(sum(self.probs[i].values()))
for sym in self.probs[i]:
self.probs[i][sym] /= totalWeight
self.sites = sum(hashRows[0].values())
# scan pwm to pre-compute logs of probabilities and min and max log-odds
# scores (over the whole PWM) for scaling; note that the same min and max
# applies for scaling long-odds scores for quantum comparisions
self.information_content = []
minSum = 0
maxSum = 0
for i in range(len(hashRows)):
self.information_content.append(self.information_content_calculation(i, hashRows))
newHashRow = {}
for base in self.alphabet:
newHashRow[base] = self.pwm_score(base, i, hashRows)
hashRows[i] = newHashRow
minSum += min(hashRows[i].values())
maxSum += max(hashRows[i].values())
self.minSum = minSum
self.maxSum = maxSum
self.rows = hashRows
# Reference 1: Wasserman and Sandelin: Nat Rev Genet. 2004 Apr;5(4):276-87.
# Reference 2: Gertz et al.: Genome Res. 2005 Aug;15(8):1145-52.
def information_content_calculation(self, i, counts):
# Reference 1)
return 2 + sum(self.information_base_content(base, i, counts) for base in self.alphabet)
# Reference 2)
# return sum( [ self.information_base_content(base,i,counts) for base in self.alphabet ] )
def information_base_content(self, base, i, counts):
# Reference 1)
# return self.score_correction(counts,base,i) * math.log ( self.score_correction(counts,base,i), 2)
# Reference 2)
return self.score_correction(counts, base, i) * self.pwm_score(base, i, counts)
def __call__(self, seq):
return self.score_seq(seq)
def __add__(self, other):
assert self.alphabet == other.alphabet
r, (p, q) = self.max_correlation(other)
if p == q == 0:
width = max(len(self), len(other))
elif p > 0:
width = max(len(other) + p, len(self))
elif q > 0:
width = max(len(self) + q, len(other))
sumx = zeros((width, len(self.alphabet)), dtype="int")
selfx = self.to_count_matrix()
otherx = other.to_count_matrix()
if p == q == 0:
sumx[: len(self)] += selfx
sumx[: len(other)] += otherx
elif p > 0:
sumx[p : p + len(other)] += otherx
sumx[: len(self)] += selfx
else:
sumx[: len(other)] += otherx
sumx[q : q + len(self)] += selfx
newRows = []
for x in sumx:
y = list(x)
y.append(consensus_symbol(y))
y = [str(yi) for yi in y]
newRows.append(y)
return PositionWeightMatrix(self.id + other.id, newRows, self.alphabet, self.background)
def __old_add__(self, other, maxp=None):
assert self.alphabet == other.alphabet
bigN = max(len(self), len(other))
smallN = min(len(self), len(other))
if not maxp:
prsq = self.correlation(other)
maxp = prsq.index(max(prsq))
leftpad = " " * maxp
rightsize = bigN - smallN
rightpad = " " * rightsize
leftStrings = []
rightStrings = []
if len(self) > len(other):
larger = self
smaller = other
leftStrings = self.consensus
rightStrings = list(leftpad) + other.consensus + list(rightpad)
else:
smaller = self
larger = other
leftStrings = list(leftpad) + self.consensus + list(rightpad)
rightStrings = other.consensus
sumx = zeros([bigN, len(self.alphabet)])
sumx += larger.to_count_matrix()
sumx[maxp : maxp + smallN] += smaller.to_count_matrix()
newRows = []
for i, x in enumerate(sumx):
y = list(x)
y.append(leftStrings[i] + rightStrings[i])
y = [str(yi) for yi in y]
newRows.append(y)
# return PositionWeightMatrix(self.id+other.id,newRows[maxp:maxp+smallN],self.alphabet,self.background)
return PositionWeightMatrix(self.id + other.id, newRows, self.alphabet, self.background)
def to_matrix(self):
m = zeros([len(self), len(self.alphabet)])
for i in range(len(self)):
for j, a in enumerate(self.alphabet):
m[i][j] = self[i][a]
return m
def to_count_matrix(self):
m = zeros([len(self), len(self.alphabet)], dtype="int")
for i in range(len(self)):
for j, a in enumerate(self.alphabet):
m[i][j] = self.counts[i][a]
return m
def max_correlation(self, otherwmx):
rsq, ixtuple = self.slide_correlation(otherwmx)
max_rsq = max(rsq)
maxp, maxq = ixtuple[rsq.index(max_rsq)]
return max_rsq, (maxp, maxq)
def slide_correlation(self, other):
assert self.alphabet == other.alphabet
selfx = self.to_count_matrix()
otherx = other.to_count_matrix()
rsq = []
ixtuple = []
# self staggered over other, scan self backwards until flush
for q in range(len(other) - 1, -1, -1):
r = 0
n = 0
for p in range(len(self)):
if q + p < len(other):
r += rsquared(list(selfx[p]), list(otherx[q + p]))
n += 1
else:
n += 1
rsq.append(r / n)
ixtuple.append((0, q))
# other staggered below self , scan other forward
for p in range(1, len(self)):
r = 0
n = 0
for q in range(len(other)):
if p + q < len(self):
r += rsquared(list(selfx[p + q]), list(otherx[q]))
n += 1
else:
n += 1
rsq.append(r / n)
ixtuple.append((p, 0))
return rsq, ixtuple
def correlation(self, otherwmx):
assert self.alphabet == otherwmx.alphabet
if len(self) > len(otherwmx):
larger = self.to_count_matrix()
smaller = otherwmx.to_count_matrix()
else:
smaller = self.to_count_matrix()
larger = otherwmx.to_count_matrix()
bigN = len(larger)
smallN = len(smaller)
position_rsq = []
# slide small over large, for ave rsq
for p in range(bigN):
if p + smallN <= bigN:
r = 0
for q in range(smallN):
r += rsquared(list(smaller[q]), list(larger[p + q]))
position_rsq.append(r / smallN)
return position_rsq
def score_align(self, align, gapmask=None, byPosition=True):
# a blank score matrix
nrows, ncols = align.dims
ascoremax = AlignScoreMatrix(align)
scoremax = ascoremax.matrix
minSeqLen = len(self)
for ir in range(nrows):
# row is missing data
if isnan(align.rows[ir][0]):
continue
for start in range(ncols):
if align.rows[ir][start] == "-":
continue
elif align.rows[ir][start] == "n":
continue
elif align.rows[ir][start] == "N":
continue
# get enough sequence for the weight matrix
subseq = ""
end = 0
for ic in range(start, ncols):
char = align.rows[ir][ic]
if char == "-" or char == "N":
continue
else:
subseq += char
if len(subseq) == minSeqLen:
end = ic + 1
# forward
scores = self.score_seq(subseq)
raw, forward_score = scores[0]
# reverse
scores = self.score_reverse_seq(subseq)
raw, reverse_score = scores[0]
score = max(forward_score, reverse_score)
# replace the alignment positions with the result
if byPosition:
scoremax[ir][start] = score
else:
# replace positions matching the width of the pwm
for i in range(start, end):
if isnan(scoremax[ir][i]):
scoremax[ir][i] = score
elif score > scoremax[ir][i]:
scoremax[ir][i] = score
# mask gap characters
if gapmask is None:
gapmask = score_align_gaps(align)
putmask(scoremax, gapmask, float("nan"))
return scoremax
# seq can be a string, a list of characters, or a quantum sequence (a list
# of hashes from symbols to probability)
def score_seq(self, seq):
if isinstance(seq[0], dict):
return self.score_quantum_seq(seq)
scores = []
for start in range(len(seq)):
if start + len(self) > len(seq):
break
subseq = seq[start : start + len(self)]
raw = 0
try:
for i, nt in enumerate(subseq):
raw += self.rows[i][nt.upper()]
scaled = self.scaled(raw)
except KeyError:
raw, scaled = float("nan"), float("nan")
scores.append((raw, scaled))
return scores
def score_quantum_seq(self, seq):
scores = []
for start in range(len(seq)):
if start + len(self) > len(seq):
break
subseq = seq[start : start + len(self)]
raw = 0
try:
for i, nt in enumerate(subseq):
numer = sum(subseq[i][nt] * self.probs[i][nt] for nt in subseq[i])
denom = sum(subseq[i][nt] * self.background[nt] for nt in subseq[i])
raw += math.log(numer / denom, 2)
scaled = self.scaled(raw)
except KeyError:
raw, scaled = float("nan"), float("nan")
except OverflowError:
raw, scaled = float("nan"), float("nan")
except ValueError:
raw, scaled = float("nan"), float("nan")
scores.append((raw, scaled))
return scores
def score_reverse_seq(self, seq):
revSeq = reverse_complement(seq)
scores = self.score_seq(revSeq)
scores.reverse()
return scores
def scaled(self, val):
return (val - self.minSum) / (self.maxSum - self.minSum)
def pseudocount(self, base=None):
def f(count):
return math.sqrt(count + 1)
if base in self.alphabet:
return f(self.matrix_base_counts[base])
elif base is None:
return f(self.sites)
else:
return float("nan")
def simple_probability(self, freq, base, i):
# p(base,i) = f(base,i)
# ----------------------
# sum(f(base,{A,C,G,T}))
return float(freq[i][base]) / sum(freq[i][nt] for nt in self.alphabet)
def corrected_probability_score(self, freq, base, i):
# p(base,i) = f(base,i) + s(base)
# --------------------
# N + sum(s(A,C,T,G))
f = float(freq[i][base])
s = self.pseudocount(base)
N = self.sites
# print >>sys.stderr, "f:%.3f + s:%.3f = %.3f" % (f,s,f + s)
# print >>sys.stderr, "-------------------------"
# print >>sys.stderr, "N:%d + %d = %d" % (N,self.pseudocount(), N + self.pseudocount())
# print >>sys.stderr, "\t\t %.3f\n" % ((f + s) / (N + self.pseudocount()))
assert (f + s) > 0
return (f + s) / (N + self.pseudocount())
def pwm_score(self, base, i, freq, background=None):
if background is None:
background = self.background
p = self.score_correction(freq, base, i)
# print >>sys.stderr, p
# print >>sys.stderr, "k %d %c" % (i,base),freq[i][base]
b = background[base]
try:
return math.log(p / b, 2)
except OverflowError:
# print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b)
# print >>sys.stderr,self.id
return float("nan")
except ValueError:
# print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b)
# print >>sys.stderr,self.id
return float("nan")
def parse_weight(self, weightString):
fields = weightString.split(".")
if len(fields) > 2:
raise ValueError
w = int(fields[0])
s = 1
if len(fields) == 2:
for _ in range(0, len(fields[1])):
s *= 10
w = s * w + int(fields[1])
return (w, s) # w = the weight
# s = the scale used (a power of 10)
def __str__(self):
lines = [self.id]
headers = ["%s" % nt for nt in self.alphabet]
lines.append("P0\t" + "\t".join(headers))
for ix in range(0, len(self.rows)):
weights = ["%d" % self.counts[ix][nt] for nt in self.alphabet]
# lines.append(("%02d\t" % ix) + "\t".join(weights) + "\t" + self.consensus[ix])
lines.append(
("%02d\t" % ix)
+ "\t".join(weights)
+ "\t"
+ str(sum(self.counts[ix].values()))
+ "\t"
+ self.consensus[ix]
)
return "\n".join(lines)
def __getitem__(self, key):
return self.rows[key]
def __setitem__(self, key, value):
self.rows[key] = value
def __len__(self):
return len(self.rows)
def score_align_gaps(align):
# a blank score matrix
nrows, ncols = align.dims
scoremax = AlignScoreMatrix(align).matrix
for ir in range(nrows):
# row is missing data
if isnan(align.rows[ir][0]):
continue
# scan for gaps
for pos in range(ncols):
if align.rows[ir][pos] == "-":
scoremax[ir][pos] = 1
else:
scoremax[ir][pos] = 0
return scoremax
# -----------
#
# WeightMatrix Reader--
# Read position weight matrices (PWM) from a file.
#
# -----------
class Reader:
"""Iterate over all interesting weight matrices in a file"""
def __init__(self, file, tfIds=None, name=None, format="basic", background=None, score_correction=True):
self.tfIds = tfIds
self.file = file
self.name = name
self.lineNumber = 0
self.format = format
self.background = background
self.score_correction = score_correction
def close(self):
self.file.close()
def where(self):
if self.name is None:
return "line %d" % self.lineNumber
else:
return "line %d in %s" % (self.lineNumber, self.name)
def __iter__(self):
if self.format == "basic":
return self.read_as_basic()
elif self.format == "transfac":
return self.read_as_transfac()
else:
raise ValueError("unknown weight matrix file format: '%s'" % self.format)
def read_as_basic(self):
tfId = None
pwmRows = None
alphabet = ["A", "C", "G", "T"]
while True:
line = self.file.readline()
if not line:
break
line = line.strip()
self.lineNumber += 1
if line.startswith(">"):
if pwmRows is not None:
yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background)
# try:
# yield PositionWeightMatrix(tfId,pwmRows,alphabet)
# except:
# print >>sys.stderr, "Failed to read", tfId
tfId = line.strip()[1:]
pwmRows = []
elif line[0].isdigit():
tokens = line.strip().split()
tokens.append(consensus_symbol(line))
# print >>sys.stderr,[ "%.2f" % (float(v)/sum(vals)) for v in vals], tokens[-1]
pwmRows.append(tokens)
if pwmRows is not None: # we've finished collecting a desired matrix
yield PositionWeightMatrix(
tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction
)
def read_as_transfac(self):
self.tfToPwm = {}
tfId = None
pwmRows = None
while True:
line = self.file.readline()
if not line:
break
line = line.strip()
self.lineNumber += 1
# handle an ID line
if line.startswith("ID"):
if pwmRows is not None: # we've finished collecting a desired matrix
try:
# FIXME: alphabet is undefined here!
yield PositionWeightMatrix(
tfId,
pwmRows,
alphabet, # noqa: F821
background=self.background,
score_correction=self.score_correction,
)
except Exception:
print("Failed to read", tfId, file=sys.stderr)
tfId = None
pwmRows = None
tokens = line.split(None, 2)
if len(tokens) != 2:
raise ValueError("bad line, need two fields (%s)" % self.where())
tfId = tokens[1]
if self.tfIds is not None and (tfId not in self.tfIds):
continue # ignore it, this isn't a desired matrix
if tfId in self.tfToPwm:
raise ValueError(f"transcription factor {tfId} appears twice ({self.where()})")
pwmRows = [] # start collecting a desired matrix
continue
# if we're not collecting, skip this line
if pwmRows is None:
continue
if len(line) < 1:
continue
# name, if present, added to ID
if line.startswith("NA"):
words = line.strip().split()
tfId = tfId + "\t" + " ".join(words[1:])
# handle a P0 line
if line.startswith("P0"):
alphabet = line.split()[1:]
if len(alphabet) < 2:
raise ValueError("bad line, need more dna (%s)" % self.where())
continue
# handle a 01,02,etc. line
if line[0].isdigit():
tokens = line.split()
try:
index = int(tokens[0])
if index != len(pwmRows) + 1:
raise ValueError
except Exception:
raise ValueError("bad line, bad index (%s)" % self.where())
pwmRows.append(tokens[1:])
continue
# skip low quality entries
if line.startswith("CC TRANSFAC Sites of quality"):
print(line.strip(), tfId, file=sys.stderr)
pwmRows = None
continue
if pwmRows is not None: # we've finished collecting a desired matrix
yield PositionWeightMatrix(
tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction
)
# clean up
self.tfToPwm = None
def isnan(x):
# return ieeespecial.isnan(x)
if x == x:
return False
return True
def reverse_complement(nukes):
return nukes[::-1].translate(PositionWeightMatrix.complementMap)
def rsquared(x, y):
try:
return sum_of_squares(x, y) ** 2 / (sum_of_squares(x) * sum_of_squares(y))
except ZeroDivisionError:
# return float('nan')
return 0
def sum_of_squares(x, y=None):
if not y:
y = x
xmean = float(sum(x)) / len(x)
ymean = float(sum(y)) / len(y)
assert len(x) == len(y)
return sum(float(xi) * float(yi) for xi, yi in zip(x, y)) - len(x) * xmean * ymean
def consensus_symbol(pattern):
if isinstance(pattern, str):
try:
pattern = [int(x) for x in pattern.split()]
except ValueError as e:
print(pattern, file=sys.stderr)
raise ValueError(e)
# IUPAC-IUB nomenclature for wobblers
wobblers = {
"R": frozenset(["A", "G"]),
"Y": frozenset(["C", "T"]),
"M": frozenset(["A", "C"]),
"K": frozenset(["G", "T"]),
"S": frozenset(["G", "C"]),
"W": frozenset(["A", "T"]),
"H": frozenset(["A", "C", "T"]),
"B": frozenset(["G", "T", "C"]),
"V": frozenset(["G", "C", "A"]),
"D": frozenset(["G", "T", "A"]),
}
symbols = ["A", "C", "G", "T"]
if isinstance(pattern, dict):
pattern = [pattern[u] for u in symbols]
total = sum(pattern)
f = [(space / 1e5) + (float(x) / total) for space, x in enumerate(pattern)]
copy = []
copy[:] = f[:]
copy.sort()
# http://www.genomatix.de/online_help/help_matinspector/matrix_help.html --
# url says consensus must be greater than 50%, and at least twice the freq
# of the second-most frequent. A double-degenerate symbol can be used
# if the top two account for 75% or more of the nt, if each is less than 50%
# Otherwise, N is used in the consensus.
tops = copy[-2:]
if tops[1] > 0.5 and tops[1] >= 2 * tops[0]:
return symbols[f.index(tops[1])]
elif tops[0] < 0.5 and sum(tops) >= 0.75:
degen = frozenset(symbols[f.index(v)] for v in tops)
for degenSymbol, wobbles in wobblers.items():
# print >>sys.stderr,wobbles
if degen == wobbles:
return degenSymbol
else:
return "N"
print(pattern, file=sys.stderr)
raise Exception("?")
# import C extensions
try:
from ._position_weight_matrix import c_match_consensus
def match_consensus(sequence, pattern):
return c_match_consensus(sequence, pattern, len(sequence))
# print >>sys.stderr, "C match_consensus used"
except ImportError:
# print >>sys.stderr, "python match_consensus used"
def match_consensus(sequence, pattern, size):
for s, p in zip(sequence, pattern):
if p == "N":
continue
if s not in PositionWeightMatrix.symbols[p]:
return False
return True
|
{
"content_hash": "c3fd27cde17f2051e1b409ac531c7298",
"timestamp": "",
"source": "github",
"line_count": 912,
"max_line_length": 123,
"avg_line_length": 33.9484649122807,
"alnum_prop": 0.49995155195245633,
"repo_name": "bxlab/bx-python",
"id": "4edef1af1d31ba82f871a0274fcfe5b3964d7ba3",
"size": "30984",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lib/bx/pwm/position_weight_matrix.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "100576"
},
{
"name": "Cython",
"bytes": "136438"
},
{
"name": "PostScript",
"bytes": "1169"
},
{
"name": "Python",
"bytes": "1101207"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
}
|
from imp import load_source
from pathlib import Path
from os.path import expanduser
from pprint import pformat
from subprocess import Popen, PIPE
import os
import sys
from psutil import Process, TimeoutExpired
import colorama
import six
from . import logs, conf, types, shells
def setup_user_dir():
"""Returns user config dir, create it when it doesn't exist."""
user_dir = Path(expanduser('~/.thefuck'))
rules_dir = user_dir.joinpath('rules')
if not rules_dir.is_dir():
rules_dir.mkdir(parents=True)
conf.initialize_settings_file(user_dir)
return user_dir
def load_rule(rule):
"""Imports rule module and returns it."""
rule_module = load_source(rule.name[:-3], str(rule))
return types.Rule(rule.name[:-3], rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
getattr(rule_module, 'priority', conf.DEFAULT_PRIORITY))
def _get_loaded_rules(rules, settings):
"""Yields all available rules."""
for rule in rules:
if rule.name != '__init__.py':
loaded_rule = load_rule(rule)
if loaded_rule in settings.rules:
yield loaded_rule
def get_rules(user_dir, settings):
"""Returns all enabled rules."""
bundled = Path(__file__).parent \
.joinpath('rules') \
.glob('*.py')
user = user_dir.joinpath('rules').glob('*.py')
rules = _get_loaded_rules(sorted(bundled) + sorted(user), settings)
return sorted(rules, key=lambda rule: settings.priority.get(
rule.name, rule.priority))
def wait_output(settings, popen):
"""Returns `True` if we can get output of the command in the
`wait_command` time.
Command will be killed if it wasn't finished in the time.
"""
proc = Process(popen.pid)
try:
proc.wait(settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
child.kill()
proc.kill()
return False
def get_command(settings, args):
"""Creates command from `args` and executes it."""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in args[1:])
else:
script = ' '.join(args[1:])
if not script:
return
script = shells.from_shell(script)
logs.debug('Call: {}'.format(script), settings)
result = Popen(script, shell=True, stdout=PIPE, stderr=PIPE,
env=dict(os.environ, LANG='C'))
if wait_output(settings, result):
return types.Command(script, result.stdout.read().decode('utf-8'),
result.stderr.read().decode('utf-8'))
def get_matched_rule(command, rules, settings):
"""Returns first matched rule for command."""
for rule in rules:
try:
logs.debug(u'Trying rule: {}'.format(rule.name), settings)
if rule.match(command, settings):
return rule
except Exception:
logs.rule_failed(rule, sys.exc_info(), settings)
def confirm(new_command, side_effect, settings):
"""Returns `True` when running of new command confirmed."""
if not settings.require_confirmation:
logs.show_command(new_command, side_effect, settings)
return True
logs.confirm_command(new_command, side_effect, settings)
try:
sys.stdin.read(1)
return True
except KeyboardInterrupt:
logs.failed('Aborted', settings)
return False
def run_rule(rule, command, settings):
"""Runs command from rule for passed command."""
new_command = shells.to_shell(rule.get_new_command(command, settings))
if confirm(new_command, rule.side_effect, settings):
if rule.side_effect:
rule.side_effect(command, settings)
shells.put_to_history(new_command)
print(new_command)
def main():
colorama.init()
user_dir = setup_user_dir()
settings = conf.get_settings(user_dir)
logs.debug('Run with settings: {}'.format(pformat(settings)), settings)
command = get_command(settings, sys.argv)
if command:
logs.debug('Received stdout: {}'.format(command.stdout), settings)
logs.debug('Received stderr: {}'.format(command.stderr), settings)
rules = get_rules(user_dir, settings)
logs.debug(
'Loaded rules: {}'.format(', '.join(rule.name for rule in rules)),
settings)
matched_rule = get_matched_rule(command, rules, settings)
if matched_rule:
logs.debug('Matched rule: {}'.format(matched_rule.name), settings)
run_rule(matched_rule, command, settings)
return
logs.failed('No fuck given', settings)
|
{
"content_hash": "f524a852f72f4e815cca783f299ae4b1",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 78,
"avg_line_length": 32.34228187919463,
"alnum_prop": 0.6219132600124507,
"repo_name": "suxinde2009/thefuck",
"id": "8d1a3dacac63fcf0f81530c1ce162ac81782380f",
"size": "4819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thefuck/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132514"
}
],
"symlink_target": ""
}
|
"""
Modifies the segment identifier column of a PDB file (default is an empty segment).
Usage:
python pdb_seg.py -<segment id> <pdb file>
Example:
python pdb_seg.py -C 1CTF.pdb
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import os
import sys
__author__ = "Joao Rodrigues"
__email__ = "j.p.g.l.m.rodrigues@gmail.com"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = ' '
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0].startswith('-'):
emsg = 'ERROR! First argument is not an option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
# Validate option
if len(option) > 4:
emsg = 'ERROR!! Segment id must be max. four characters: \'{}\'\n'
sys.stderr.write(emsg.format(option))
sys.exit(1)
return (fh, option)
def pad_line(line):
"""Helper function to pad line to 80 characters in case it is shorter"""
size_of_line = len(line)
if size_of_line < 80:
padding = 80 - size_of_line + 1
line = line.strip('\n') + ' ' * padding + '\n'
return line[:81] # 80 + newline character
def run(fhandle, segment_id):
"""
Set the segment identifier column in all ATOM/HETATM records to a value.
This function is a generator.
Parameters
----------
fhandle : an iterable giving the PDB file line-by-line.
segment_id : str
The new segment ID.
Yields
------
str (line-by-line)
The modified (or not) PDB line.
"""
_pad_line = pad_line
records = ('ATOM', 'HETATM')
for line in fhandle:
if line.startswith(records):
line = _pad_line(line)
yield line[:72] + segment_id.ljust(4) + line[76:]
else:
yield line
alter_segid = run
def main():
# Check Input
pdbfh, segment_id = check_input(sys.argv[1:])
# Do the job
new_pdb = run(pdbfh, segment_id)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "56b48a7ba510bb781598c08516542cb3",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 83,
"avg_line_length": 27.373417721518987,
"alnum_prop": 0.5569942196531792,
"repo_name": "haddocking/pdb-tools",
"id": "4a4e8e2fc61327488128206a74a33b639f8c96d3",
"size": "4958",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdbtools/pdb_seg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "578989"
},
{
"name": "TeX",
"bytes": "980"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import shutil
import subprocess
from os.path import splitext
from . import image, utils
from .settings import Status, get_thumb
from .utils import is_valid_html5_video
class SubprocessException(Exception):
pass
def check_subprocess(cmd, source, outname):
"""Run the command to resize the video and remove the output file if the
processing fails.
"""
logger = logging.getLogger(__name__)
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
logger.debug('Process terminated, removing file %s', outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if res.returncode:
logger.debug('STDOUT:\n %s', res.stdout.decode('utf8'))
logger.debug('STDERR:\n %s', res.stderr.decode('utf8'))
if os.path.isfile(outname):
logger.debug('Removing file %s', outname)
os.remove(outname)
raise SubprocessException('Failed to process ' + source)
def video_size(source, converter='ffmpeg'):
"""Returns the dimensions of the video."""
res = subprocess.run([converter, '-i', source], stderr=subprocess.PIPE)
stderr = res.stderr.decode('utf8')
pattern = re.compile(r'Stream.*Video.* ([0-9]+)x([0-9]+)')
match = pattern.search(stderr)
rot_pattern = re.compile(r'rotate\s*:\s*-?(90|270)')
rot_match = rot_pattern.search(stderr)
if match:
x, y = int(match.groups()[0]), int(match.groups()[1])
else:
x = y = 0
if rot_match:
x, y = y, x
return x, y
def generate_video(source, outname, settings, options=None):
"""Video processor.
:param source: path to a video
:param outname: path to the generated video
:param settings: settings dict
:param options: array of options passed to ffmpeg
"""
logger = logging.getLogger(__name__)
# Don't transcode if source is in the required format and
# has fitting datedimensions, copy instead.
converter = settings['video_converter']
w_src, h_src = video_size(source, converter=converter)
w_dst, h_dst = settings['video_size']
logger.debug('Video size: %i, %i -> %i, %i', w_src, h_src, w_dst, h_dst)
base, src_ext = splitext(source)
base, dst_ext = splitext(outname)
if dst_ext == src_ext and w_src <= w_dst and h_src <= h_dst:
logger.debug('Video is smaller than the max size, copying it instead')
shutil.copy(source, outname)
return
# http://stackoverflow.com/questions/8218363/maintaining-ffmpeg-aspect-ratio
# + I made a drawing on paper to figure this out
if h_dst * w_src < h_src * w_dst:
# biggest fitting dimension is height
resize_opt = ['-vf', "scale=trunc(oh*a/2)*2:%i" % h_dst]
else:
# biggest fitting dimension is width
resize_opt = ['-vf', "scale=%i:trunc(ow/a/2)*2" % w_dst]
# do not resize if input dimensions are smaller than output dimensions
if w_src <= w_dst and h_src <= h_dst:
resize_opt = []
# Encoding options improved, thanks to
# http://ffmpeg.org/trac/ffmpeg/wiki/vpxEncodingGuide
cmd = [converter, '-i', source, '-y'] # -y to overwrite output files
if options is not None:
cmd += options
cmd += resize_opt + [outname]
logger.debug('Processing video: %s', ' '.join(cmd))
check_subprocess(cmd, source, outname)
def generate_thumbnail(source, outname, box, delay, fit=True, options=None,
converter='ffmpeg'):
"""Create a thumbnail image for the video source, based on ffmpeg."""
logger = logging.getLogger(__name__)
tmpfile = outname + ".tmp.jpg"
# dump an image of the video
cmd = [converter, '-i', source, '-an', '-r', '1',
'-ss', delay, '-vframes', '1', '-y', tmpfile]
logger.debug('Create thumbnail for video: %s', ' '.join(cmd))
check_subprocess(cmd, source, outname)
# use the generate_thumbnail function from sigal.image
image.generate_thumbnail(tmpfile, outname, box, fit=fit, options=options)
# remove the image
os.unlink(tmpfile)
def process_video(filepath, outpath, settings):
"""Process a video: resize, create thumbnail."""
logger = logging.getLogger(__name__)
filename = os.path.split(filepath)[1]
basename, ext = splitext(filename)
try:
if settings['use_orig'] and is_valid_html5_video(ext):
outname = os.path.join(outpath, filename)
utils.copy(filepath, outname, symlink=settings['orig_link'])
else:
valid_formats = ['mp4', 'webm']
video_format = settings['video_format']
if video_format not in valid_formats:
logger.error('Invalid video_format. Please choose one of: %s',
valid_formats)
raise ValueError
outname = os.path.join(outpath, basename + '.' + video_format)
generate_video(filepath, outname, settings,
options=settings.get(video_format + '_options'))
except Exception:
if logger.getEffectiveLevel() == logging.DEBUG:
raise
else:
return Status.FAILURE
if settings['make_thumbs']:
thumb_name = os.path.join(outpath, get_thumb(settings, filename))
try:
generate_thumbnail(
outname, thumb_name, settings['thumb_size'],
settings['thumb_video_delay'], fit=settings['thumb_fit'],
options=settings['jpg_options'],
converter=settings['video_converter'])
except Exception:
if logger.getEffectiveLevel() == logging.DEBUG:
raise
else:
return Status.FAILURE
return Status.SUCCESS
|
{
"content_hash": "0ac6669374762fc800f064174a650891",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 80,
"avg_line_length": 34.28654970760234,
"alnum_prop": 0.6118028313150264,
"repo_name": "kontza/sigal",
"id": "4a2a9d14cbec0eeb02e37cf985d001510fba452f",
"size": "7014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigal/video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60639"
},
{
"name": "HTML",
"bytes": "33913"
},
{
"name": "Makefile",
"bytes": "873"
},
{
"name": "Python",
"bytes": "132374"
},
{
"name": "Shell",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from happy_api.models import Category, Note, Text, Reminder, Checklist, Image, Group
from datetime import datetime
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
ctime = serializers.DateTimeField(source='date_joined')
class Meta:
model = User
#read_only_fields = ['ctime',]
fields = ('id', 'username', 'password', 'email', 'ctime')
write_only_fields = ('password',)
def create(self, validated_data):
"""Methode to hash the password before saving the new user object"""
passwd = validated_data['password']
del validated_data['date_joined']
user = User.objects.create_user(**validated_data)
user.set_password(passwd)
return user
def update(self, instance, validated_data):
"""Methode to hash the password before saving it"""
instance.email = validated_data.get('email', instance.email)
if validated_data['password']:
instance.set_password(validated_data['password'])
else:
instance.password = instance.password
instance.save()
return instance
class CategorySerializer(serializers.ModelSerializer):
""" Serializer for Category Data"""
user = serializers.ReadOnlyField( source='user.id')
class Meta:
model = Category
fields = ('id', 'user', 'title')
class TextSerializer(serializers.ModelSerializer):
class Meta:
model = Text
class ReminderSerializer(serializers.ModelSerializer):
class Meta:
model = Reminder
class ChecklistSerializer(serializers.ModelSerializer):
class Meta:
model = Checklist
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
class NoteListSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.id')
class Meta:
model = Note
fields = ('id', 'user', 'category', 'cdate', 'mdate', 'revision', 'title')
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = ('id', 'user', 'category', 'cdate', 'mdate', 'revision', 'title', 'plaintext_data', 'reminder_data', 'checklist_data')
|
{
"content_hash": "3019349cf46312ef9f3420d01facf33f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 135,
"avg_line_length": 33.2112676056338,
"alnum_prop": 0.6683630195080577,
"repo_name": "nazco/todo-of-happiness-api",
"id": "f4e24f5b0b7b2cc4c4b28572217c96bdada34a48",
"size": "2358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "happy_server/happy_api/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21590"
},
{
"name": "TeX",
"bytes": "36716"
}
],
"symlink_target": ""
}
|
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from oslo_log import log as logging
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
from nova.compute import power_state
import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vm_util
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class VolumeMoRefProxy(session.StableMoRefProxy):
def __init__(self, connection_info_data):
volume_ref_value = connection_info_data.get('volume')
ref = None
if volume_ref_value:
ref = vutil.get_moref(volume_ref_value, 'VirtualMachine')
super(VolumeMoRefProxy, self).__init__(ref)
self._connection_info_data = connection_info_data
def fetch_moref(self, session):
volume_id = self._connection_info_data.get('volume_id')
if not volume_id:
volume_id = self._connection_info_data.get('name')
if volume_id:
self.moref = vm_util._get_vm_ref_from_vm_uuid(session, volume_id)
class VMwareVolumeOps(object):
"""Management class for Volume-related tasks."""
def __init__(self, session, cluster=None):
self._session = session
self._cluster = cluster
def attach_disk_to_vm(self, vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
disk_size=None, linked_clone=False,
device_name=None, disk_io_limits=None):
"""Attach disk to VM by reconfiguration."""
instance_name = instance.name
client_factory = self._session.vim.client.factory
devices = vm_util.get_hardware_devices(self._session, vm_ref)
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
adapter_type)
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
client_factory, disk_type, vmdk_path,
disk_size, linked_clone, controller_key,
unit_number, device_name, disk_io_limits)
if controller_spec:
vmdk_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s",
{'instance_name': instance_name, 'vmdk_path': vmdk_path,
'device_name': device_name, 'disk_type': disk_type},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vmdk_attach_config_spec)
LOG.debug("Reconfigured VM instance %(instance_name)s to attach "
"disk %(vmdk_path)s or device %(device_name)s with type "
"%(disk_type)s",
{'instance_name': instance_name, 'vmdk_path': vmdk_path,
'device_name': device_name, 'disk_type': disk_type},
instance=instance)
def _update_volume_details(self, vm_ref, volume_uuid, device_uuid):
# Store the uuid of the volume_device
volume_option = 'volume-%s' % volume_uuid
extra_opts = {volume_option: device_uuid}
client_factory = self._session.vim.client.factory
extra_config_specs = vm_util.get_vm_extra_config_spec(
client_factory, extra_opts)
vm_util.reconfigure_vm(self._session, vm_ref, extra_config_specs)
def _get_volume_uuid(self, vm_ref, volume_uuid):
prop = 'config.extraConfig["volume-%s"]' % volume_uuid
opt_val = self._session._call_method(vutil,
'get_object_property',
vm_ref,
prop)
if opt_val is not None:
return opt_val.value
def detach_disk_from_vm(self, vm_ref, instance, device,
destroy_disk=False):
"""Detach disk from VM by reconfiguration."""
instance_name = instance.name
client_factory = self._session.vim.client.factory
vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(
client_factory, device, destroy_disk)
disk_key = device.key
LOG.debug("Reconfiguring VM instance %(instance_name)s to detach "
"disk %(disk_key)s",
{'instance_name': instance_name, 'disk_key': disk_key},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vmdk_detach_config_spec)
LOG.debug("Reconfigured VM instance %(instance_name)s to detach "
"disk %(disk_key)s",
{'instance_name': instance_name, 'disk_key': disk_key},
instance=instance)
def _iscsi_get_target(self, data):
"""Return the iSCSI Target given a volume info."""
target_portal = data['target_portal']
target_iqn = data['target_iqn']
host_mor = vm_util.get_host_ref(self._session, self._cluster)
lst_properties = ["config.storageDevice.hostBusAdapter",
"config.storageDevice.scsiTopology",
"config.storageDevice.scsiLun"]
prop_dict = self._session._call_method(vutil,
"get_object_properties_dict",
host_mor,
lst_properties)
result = (None, None)
hbas_ret = None
scsi_topology = None
scsi_lun_ret = None
if prop_dict:
hbas_ret = prop_dict.get('config.storageDevice.hostBusAdapter')
scsi_topology = prop_dict.get('config.storageDevice.scsiTopology')
scsi_lun_ret = prop_dict.get('config.storageDevice.scsiLun')
# Meaning there are no host bus adapters on the host
if hbas_ret is None:
return result
host_hbas = hbas_ret.HostHostBusAdapter
if not host_hbas:
return result
for hba in host_hbas:
if hba.__class__.__name__ == 'HostInternetScsiHba':
hba_key = hba.key
break
else:
return result
if scsi_topology is None:
return result
host_adapters = scsi_topology.adapter
if not host_adapters:
return result
scsi_lun_key = None
for adapter in host_adapters:
if adapter.adapter == hba_key:
if not getattr(adapter, 'target', None):
return result
for target in adapter.target:
if (getattr(target.transport, 'address', None) and
target.transport.address[0] == target_portal and
target.transport.iScsiName == target_iqn):
if not target.lun:
return result
for lun in target.lun:
if 'host.ScsiDisk' in lun.scsiLun:
scsi_lun_key = lun.scsiLun
break
break
break
if scsi_lun_key is None:
return result
if scsi_lun_ret is None:
return result
host_scsi_luns = scsi_lun_ret.ScsiLun
if not host_scsi_luns:
return result
for scsi_lun in host_scsi_luns:
if scsi_lun.key == scsi_lun_key:
return (scsi_lun.deviceName, scsi_lun.uuid)
return result
def _iscsi_add_send_target_host(self, storage_system_mor, hba_device,
target_portal):
"""Adds the iscsi host to send target host list."""
client_factory = self._session.vim.client.factory
send_tgt = client_factory.create('ns0:HostInternetScsiHbaSendTarget')
(send_tgt.address, send_tgt.port) = target_portal.split(':')
LOG.debug("Adding iSCSI host %s to send targets", send_tgt.address)
self._session._call_method(
self._session.vim, "AddInternetScsiSendTargets",
storage_system_mor, iScsiHbaDevice=hba_device, targets=[send_tgt])
def _iscsi_rescan_hba(self, target_portal):
"""Rescan the iSCSI HBA to discover iSCSI targets."""
host_mor = vm_util.get_host_ref(self._session, self._cluster)
storage_system_mor = self._session._call_method(
vutil,
"get_object_property",
host_mor,
"configManager.storageSystem")
hbas_ret = self._session._call_method(
vutil,
"get_object_property",
storage_system_mor,
"storageDeviceInfo.hostBusAdapter")
# Meaning there are no host bus adapters on the host
if hbas_ret is None:
return
host_hbas = hbas_ret.HostHostBusAdapter
if not host_hbas:
return
for hba in host_hbas:
if hba.__class__.__name__ == 'HostInternetScsiHba':
hba_device = hba.device
if target_portal:
# Check if iscsi host is already in the send target host
# list
send_targets = getattr(hba, 'configuredSendTarget', [])
send_tgt_portals = ['%s:%s' % (s.address, s.port) for s in
send_targets]
if target_portal not in send_tgt_portals:
self._iscsi_add_send_target_host(storage_system_mor,
hba_device,
target_portal)
break
else:
return
LOG.debug("Rescanning HBA %s", hba_device)
self._session._call_method(self._session.vim,
"RescanHba", storage_system_mor, hbaDevice=hba_device)
LOG.debug("Rescanned HBA %s ", hba_device)
def _iscsi_discover_target(self, data):
"""Get iSCSI target, rescanning the HBA if necessary."""
target_portal = data['target_portal']
target_iqn = data['target_iqn']
LOG.debug("Discovering iSCSI target %(target_iqn)s from "
"%(target_portal)s.",
{'target_iqn': target_iqn, 'target_portal': target_portal})
device_name, uuid = self._iscsi_get_target(data)
if device_name:
LOG.debug("Storage target found. No need to discover")
return (device_name, uuid)
# Rescan iSCSI HBA with iscsi target host
self._iscsi_rescan_hba(target_portal)
# Find iSCSI Target again
device_name, uuid = self._iscsi_get_target(data)
if device_name:
LOG.debug("Discovered iSCSI target %(target_iqn)s from "
"%(target_portal)s.",
{'target_iqn': target_iqn,
'target_portal': target_portal})
else:
LOG.debug("Unable to discovered iSCSI target %(target_iqn)s "
"from %(target_portal)s.",
{'target_iqn': target_iqn,
'target_portal': target_portal})
return (device_name, uuid)
def _iscsi_get_host_iqn(self, instance):
"""Return the host iSCSI IQN."""
try:
host_mor = vm_util.get_host_ref_for_vm(self._session, instance)
except exception.InstanceNotFound:
host_mor = vm_util.get_host_ref(self._session, self._cluster)
hbas_ret = self._session._call_method(
vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
# Meaning there are no host bus adapters on the host
if hbas_ret is None:
return
host_hbas = hbas_ret.HostHostBusAdapter
if not host_hbas:
return
for hba in host_hbas:
if hba.__class__.__name__ == 'HostInternetScsiHba':
return hba.iScsiName
def get_volume_connector(self, instance):
"""Return volume connector information."""
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
except exception.InstanceNotFound:
vm_ref = None
iqn = self._iscsi_get_host_iqn(instance)
connector = {'ip': CONF.vmware.host_ip,
'initiator': iqn,
'host': CONF.vmware.host_ip}
if vm_ref:
connector['instance'] = vutil.get_moref_value(vm_ref)
return connector
@staticmethod
def _get_volume_ref(connection_info_data):
"""Get the volume moref from the "data" field in connection_info ."""
return VolumeMoRefProxy(connection_info_data)
def _get_vmdk_base_volume_device(self, volume_ref):
# Get the vmdk file name that the VM is pointing to
hardware_devices = vm_util.get_hardware_devices(self._session,
volume_ref)
return vm_util.get_vmdk_volume_disk(hardware_devices)
def _attach_volume_vmdk(self, connection_info, instance,
adapter_type=None):
"""Attach vmdk volume storage to VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
LOG.debug("_attach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
volume_ref = self._get_volume_ref(data)
# Get details required for adding disk device such as
# adapter_type, disk_type
vmdk = vm_util.get_vmdk_info(self._session, volume_ref)
adapter_type = adapter_type or vmdk.adapter_type
# IDE does not support disk hotplug
if adapter_type == constants.ADAPTER_TYPE_IDE:
state = vm_util.get_vm_state(self._session, instance)
if state != power_state.SHUTDOWN:
raise exception.Invalid(_('%s does not support disk '
'hotplug.') % adapter_type)
# Attach the disk to virtual machine instance
self.attach_disk_to_vm(vm_ref, instance, adapter_type, vmdk.disk_type,
vmdk_path=vmdk.path)
# Store the uuid of the volume_device
self._update_volume_details(vm_ref, data['volume_id'],
vmdk.device.backing.uuid)
LOG.debug("Attached VMDK: %s", connection_info, instance=instance)
def _attach_volume_iscsi(self, connection_info, instance,
adapter_type=None):
"""Attach iscsi volume storage to VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Attach Volume to VM
LOG.debug("_attach_volume_iscsi: %s", connection_info,
instance=instance)
data = connection_info['data']
# Discover iSCSI Target
device_name = self._iscsi_discover_target(data)[0]
if device_name is None:
raise exception.StorageError(
reason=_("Unable to find iSCSI Target"))
if adapter_type is None:
# Get the vmdk file name that the VM is pointing to
hardware_devices = vm_util.get_hardware_devices(self._session,
vm_ref)
adapter_type = vm_util.get_scsi_adapter_type(hardware_devices)
self.attach_disk_to_vm(vm_ref, instance,
adapter_type, 'rdmp',
device_name=device_name)
LOG.debug("Attached ISCSI: %s", connection_info, instance=instance)
def _get_controller_key_and_unit(self, vm_ref, adapter_type):
LOG.debug("_get_controller_key_and_unit vm: %(vm_ref)s, adapter: "
"%(adapter)s.",
{'vm_ref': vm_ref, 'adapter': adapter_type})
client_factory = self._session.vim.client.factory
devices = self._session._call_method(vutil,
"get_object_property",
vm_ref,
"config.hardware.device")
return vm_util.allocate_controller_key_and_unit_number(
client_factory, devices, adapter_type)
def _attach_fcd(self, vm_ref, adapter_type, fcd_id, ds_ref_val):
(controller_key, unit_number,
controller_spec) = self._get_controller_key_and_unit(
vm_ref, adapter_type)
if controller_spec:
# No controller available to attach, create one first.
config_spec = self._session.vim.client.factory.create(
'ns0:VirtualMachineConfigSpec')
config_spec.deviceChange = [controller_spec]
vm_util.reconfigure_vm(self._session, vm_ref, config_spec)
(controller_key, unit_number,
controller_spec) = self._get_controller_key_and_unit(
vm_ref, adapter_type)
vm_util.attach_fcd(
self._session, vm_ref, fcd_id, ds_ref_val, controller_key,
unit_number)
def _attach_volume_fcd(self, connection_info, instance):
"""Attach fcd volume storage to VM instance."""
LOG.debug("_attach_volume_fcd: %s", connection_info, instance=instance)
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
adapter_type = data['adapter_type']
if adapter_type == constants.ADAPTER_TYPE_IDE:
state = vm_util.get_vm_state(self._session, instance)
if state != power_state.SHUTDOWN:
raise exception.Invalid(_('%s does not support disk '
'hotplug.') % adapter_type)
self._attach_fcd(vm_ref, adapter_type, data['id'], data['ds_ref_val'])
LOG.debug("Attached fcd: %s", connection_info, instance=instance)
def attach_volume(self, connection_info, instance, adapter_type=None):
"""Attach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug("Volume attach. Driver type: %s", driver_type,
instance=instance)
if driver_type == constants.DISK_FORMAT_VMDK:
self._attach_volume_vmdk(connection_info, instance, adapter_type)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._attach_volume_iscsi(connection_info, instance, adapter_type)
elif driver_type == constants.DISK_FORMAT_FCD:
self._attach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def _get_host_of_vm(self, vm_ref):
"""Get the ESX host of given VM."""
return self._session._call_method(vutil, 'get_object_property',
vm_ref, 'runtime').host
def _get_res_pool_of_host(self, host):
"""Get the resource pool of given host's cluster."""
# Get the compute resource, the host belongs to
compute_res = self._session._call_method(vutil,
'get_object_property',
host,
'parent')
# Get resource pool from the compute resource
return self._session._call_method(vutil,
'get_object_property',
compute_res,
'resourcePool')
def _get_res_pool_of_vm(self, vm_ref):
"""Get resource pool to which the VM belongs."""
# Get the host, the VM belongs to
host = self._get_host_of_vm(vm_ref)
# Get the resource pool of host's cluster.
return self._get_res_pool_of_host(host)
def _consolidate_vmdk_volume(self, instance, vm_ref, device, volume_ref,
adapter_type=None, disk_type=None):
"""Consolidate volume backing VMDK files if needed.
The volume's VMDK file attached to an instance can be moved by SDRS
if enabled on the cluster.
By this the VMDK files can get copied onto another datastore and the
copy on this new location will be the latest version of the VMDK file.
So at the time of detach, we need to consolidate the current backing
VMDK file with the VMDK file in the new location.
We need to ensure that the VMDK chain (snapshots) remains intact during
the consolidation. SDRS retains the chain when it copies VMDK files
over, so for consolidation we relocate the backing with move option
as moveAllDiskBackingsAndAllowSharing and then delete the older version
of the VMDK file attaching the new version VMDK file.
In the case of a volume boot the we need to ensure that the volume
is on the datastore of the instance.
"""
original_device = self._get_vmdk_base_volume_device(volume_ref)
original_device_path = original_device.backing.fileName
current_device_path = device.backing.fileName
if original_device_path == current_device_path:
# The volume is not moved from its original location.
# No consolidation is required.
LOG.debug("The volume has not been displaced from "
"its original location: %s. No consolidation "
"needed.", current_device_path)
return
# The volume has been moved from its original location.
# Need to consolidate the VMDK files.
LOG.info("The volume's backing has been relocated to %s. Need to "
"consolidate backing disk file.", current_device_path)
# Pick the host and resource pool on which the instance resides.
# Move the volume to the datastore where the new VMDK file is present.
host = self._get_host_of_vm(vm_ref)
res_pool = self._get_res_pool_of_host(host)
datastore = device.backing.datastore
detached = False
LOG.debug("Relocating volume's backing: %(backing)s to resource "
"pool: %(rp)s, datastore: %(ds)s, host: %(host)s.",
{'backing': volume_ref, 'rp': res_pool, 'ds': datastore,
'host': host})
try:
vm_util.relocate_vm(self._session, volume_ref, res_pool, datastore,
host)
except oslo_vmw_exceptions.FileNotFoundException:
# Volume's vmdk was moved; remove the device so that we can
# relocate the volume.
LOG.warning("Virtual disk: %s of volume's backing not found.",
original_device_path, exc_info=True)
LOG.debug("Removing disk device of volume's backing and "
"reattempting relocate.")
self.detach_disk_from_vm(volume_ref, instance, original_device)
detached = True
vm_util.relocate_vm(self._session, volume_ref, res_pool, datastore,
host)
# Volume's backing is relocated now; detach the old vmdk if not done
# already.
if not detached:
try:
self.detach_disk_from_vm(volume_ref, instance,
original_device, destroy_disk=True)
except oslo_vmw_exceptions.FileNotFoundException:
LOG.debug("Original volume backing %s is missing, no need "
"to detach it", original_device.backing.fileName)
# Attach the current volume to the volume_ref
self.attach_disk_to_vm(volume_ref, instance,
adapter_type, disk_type,
vmdk_path=current_device_path)
def _get_vmdk_backed_disk_device(self, vm_ref, connection_info_data):
# Get the vmdk file name that the VM is pointing to
hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
# Get disk uuid
disk_uuid = self._get_volume_uuid(vm_ref,
connection_info_data['volume_id'])
device = vm_util.get_vmdk_backed_disk_device(hardware_devices,
disk_uuid)
if not device:
raise exception.DiskNotFound(message=_("Unable to find volume"))
return device
def _detach_volume_vmdk(self, connection_info, instance):
"""Detach volume storage to VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Detach Volume from VM
LOG.debug("_detach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
volume_ref = self._get_volume_ref(data)
device = self._get_vmdk_backed_disk_device(vm_ref, data)
hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
adapter_type = None
for hw_device in hardware_devices:
if hw_device.key == device.controllerKey:
adapter_type = vm_util.CONTROLLER_TO_ADAPTER_TYPE.get(
hw_device.__class__.__name__)
break
# IDE does not support disk hotplug
if adapter_type == constants.ADAPTER_TYPE_IDE:
state = vm_util.get_vm_state(self._session, instance)
if state != power_state.SHUTDOWN:
raise exception.Invalid(_('%s does not support disk '
'hotplug.') % adapter_type)
disk_type = vm_util._get_device_disk_type(device)
self._consolidate_vmdk_volume(instance, vm_ref, device, volume_ref,
adapter_type=adapter_type,
disk_type=disk_type)
self.detach_disk_from_vm(vm_ref, instance, device)
# Remove key-value pair <volume_id, vmdk_uuid> from instance's
# extra config. Setting value to empty string will remove the key.
self._update_volume_details(vm_ref, data['volume_id'], "")
LOG.debug("Detached VMDK: %s", connection_info, instance=instance)
def _detach_volume_iscsi(self, connection_info, instance):
"""Detach volume storage to VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Detach Volume from VM
LOG.debug("_detach_volume_iscsi: %s", connection_info,
instance=instance)
data = connection_info['data']
# Discover iSCSI Target
device_name, uuid = self._iscsi_get_target(data)
if device_name is None:
raise exception.StorageError(
reason=_("Unable to find iSCSI Target"))
# Get the vmdk file name that the VM is pointing to
hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
device = vm_util.get_rdm_disk(hardware_devices, uuid)
if device is None:
raise exception.DiskNotFound(message=_("Unable to find volume"))
self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True)
LOG.debug("Detached ISCSI: %s", connection_info, instance=instance)
def _detach_volume_fcd(self, connection_info, instance):
"""Detach fcd volume storage to VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
adapter_type = data['adapter_type']
if adapter_type == constants.ADAPTER_TYPE_IDE:
state = vm_util.get_vm_state(self._session, instance)
if state != power_state.SHUTDOWN:
raise exception.Invalid(_('%s does not support disk '
'hotplug.') % adapter_type)
vm_util.detach_fcd(self._session, vm_ref, data['id'])
def detach_volume(self, connection_info, instance):
"""Detach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug("Volume detach. Driver type: %s", driver_type,
instance=instance)
if driver_type == constants.DISK_FORMAT_VMDK:
self._detach_volume_vmdk(connection_info, instance)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._detach_volume_iscsi(connection_info, instance)
elif driver_type == constants.DISK_FORMAT_FCD:
self._detach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def attach_root_volume(self, connection_info, instance,
datastore, adapter_type=None):
"""Attach a root volume to the VM instance."""
driver_type = connection_info['driver_volume_type']
LOG.debug("Root volume attach. Driver type: %s", driver_type,
instance=instance)
if driver_type == constants.DISK_FORMAT_VMDK:
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
# Get the volume ref
volume_ref = self._get_volume_ref(data)
# Pick the resource pool on which the instance resides. Move the
# volume to the datastore of the instance.
res_pool = self._get_res_pool_of_vm(vm_ref)
vm_util.relocate_vm(self._session, volume_ref, res_pool, datastore)
self.attach_volume(connection_info, instance, adapter_type)
|
{
"content_hash": "6ff5a3d2dea95c332664db122c9885d0",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 79,
"avg_line_length": 46.39665653495441,
"alnum_prop": 0.5566182973566117,
"repo_name": "mahak/nova",
"id": "e1d60cc75135bdff289b5397163171795e8dfddf",
"size": "31201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/volumeops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import importlib
import os
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pandas.compat import PY3
import pandas.util._test_decorators as td
import pandas as pd
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
timeout=hypothesis.unlimited,
suppress_health_check=(hypothesis.HealthCheck.too_slow,)
)
hypothesis.settings.load_profile("ci")
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--skip-network", action="store_true",
help="skip network tests")
parser.addoption("--run-high-memory", action="store_true",
help="run high memory tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")
parser.addoption("--strict-data-files", action="store_true",
help="Fail if a test is skipped for missing data file.")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if 'network' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if 'high_memory' in item.keywords and not item.config.getoption(
"--run-high-memory"):
pytest.skip(
"skipping high memory test since --run-high-memory was not set")
# Configurations for all tests and all test modules
@pytest.fixture(autouse=True)
def configure_tests():
pd.set_option('chained_assignment', 'raise')
# For running doctests: make np and pd names available
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['pd'] = pd
@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
def spmatrix(request):
from scipy import sparse
return getattr(sparse, request.param + '_matrix')
@pytest.fixture(params=[0, 1, 'index', 'columns'],
ids=lambda x: "axis {!r}".format(x))
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[0, 'index'], ids=lambda x: "axis {!r}".format(x))
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell()
@pytest.fixture(params=[True, False, None])
def observed(request):
""" pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatiblity
if we decide to change the default (and would need to warn if this
parameter is not passed)"""
return request.param
_all_arithmetic_operators = ['__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
'__floordiv__', '__rfloordiv__',
'__truediv__', '__rtruediv__',
'__pow__', '__rpow__',
'__mod__', '__rmod__']
if not PY3:
_all_arithmetic_operators.extend(['__div__', '__rdiv__'])
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
_all_numeric_reductions = ['sum', 'max', 'min',
'mean', 'prod', 'std', 'var', 'median',
'kurt', 'skew']
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names
"""
return request.param
_all_boolean_reductions = ['all', 'any']
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names
"""
return request.param
_cython_table = pd.core.base.SelectionMixin._cython_table.items()
@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
return request.param
def _get_cython_table_params(ndframe, func_names_and_expected):
"""combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value
Returns
-------
results : list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [(ndframe, func, expected) for func, name in _cython_table
if name == func_name]
return results
@pytest.fixture(params=['__eq__', '__ne__', '__le__',
'__lt__', '__ge__', '__gt__'])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression(request):
"""
Fixture for trying common compression types in compression tests
"""
return request.param
@pytest.fixture(params=['gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable
"""
return request.param
@pytest.fixture(scope='module')
def datetime_tz_utc():
from datetime import timezone
return timezone.utc
@pytest.fixture(params=['inner', 'outer', 'left', 'right'])
def join_type(request):
"""
Fixture for trying all types of join operations
"""
return request.param
@pytest.fixture
def datapath(request):
"""Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path : path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), 'tests')
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if request.config.getoption("--strict-data-files"):
msg = "Could not find file {} and --strict-data-files is set."
raise ValueError(msg.format(path))
else:
msg = "Could not find {}."
pytest.skip(msg.format(path))
return path
return deco
@pytest.fixture
def iris(datapath):
"""The iris dataset as a DataFrame."""
return pd.read_csv(datapath('data', 'iris.csv'))
@pytest.fixture(params=['nlargest', 'nsmallest'])
def nselect_method(request):
"""
Fixture for trying all nselect methods
"""
return request.param
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def closed(request):
"""
Fixture for trying all interval closed parameters
"""
return request.param
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed
"""
return request.param
@pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')])
def nulls_fixture(request):
"""
Fixture for each null type in pandas
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
TIMEZONES = [None, 'UTC', 'US/Eastern', 'Asia/Tokyo', 'dateutil/US/Pacific',
'dateutil/Asia/Singapore']
@td.parametrize_fixture_doc(str(TIMEZONES))
@pytest.fixture(params=TIMEZONES)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONES[1:]))
@pytest.fixture(params=TIMEZONES[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"]
SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
FLOAT_DTYPES = [float, "float32", "float64"]
COMPLEX_DTYPES = [complex, "complex64", "complex128"]
STRING_DTYPES = [str, 'str', 'U']
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = ALL_REAL_DTYPES + COMPLEX_DTYPES + STRING_DTYPES
@pytest.fixture(params=STRING_DTYPES)
def string_dtype(request):
"""Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float32
* float64
"""
return request.param
@pytest.fixture(params=COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex64
* complex128
"""
return request.param
@pytest.fixture(params=SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int8
* int16
* int32
* int64
"""
return request.param
@pytest.fixture(params=UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* uint8
* uint16
* uint32
* uint64
"""
return request.param
@pytest.fixture(params=ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
"""
return request.param
@pytest.fixture(params=ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
"""
return request.param
@pytest.fixture(params=ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
* complex64
* complex128
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture
def mock():
"""
Fixture providing the 'mock' module.
Uses 'unittest.mock' for Python 3. Attempts to import the 3rd party 'mock'
package for Python 2, skipping if not present.
"""
if PY3:
return importlib.import_module("unittest.mock")
else:
return pytest.importorskip("mock")
# ----------------------------------------------------------------
# Global setup for tests using Hypothesis
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in 'MonthBegin MonthEnd BMonthBegin BMonthEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-99, 99),
normalize=st.booleans(),
))
for name in 'YearBegin YearEnd BYearBegin BYearEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
))
for name in 'QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12)
))
|
{
"content_hash": "851bcac45b1d24601c6d40d630e6c3c3",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 78,
"avg_line_length": 25.134191176470587,
"alnum_prop": 0.6266364367732027,
"repo_name": "harisbal/pandas",
"id": "03e09175bdb09af4cf8fbc7f5a3dc18fbb4d3604",
"size": "13673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14298777"
},
{
"name": "Shell",
"bytes": "28914"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
import os
from AppKit import *
import vanilla
from defconAppKit.controls.placardScrollView import DefconAppKitPlacardNSScrollView, PlacardPopUpButton
# -------
# Sorting
# -------
def fontFileNameSort(fonts):
sortable = []
noPathCounter = 0
for font in fonts:
if font.path is not None:
s = os.path.basename(font.path)
else:
noPathCounter += 1
s = []
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
sortable.append((s, font))
fonts = [item[-1] for item in sorted(sortable)]
return fonts
def _isItalic(font):
isItalic = False
if font.info.styleMapStyleName is not None and "italic" in font.info.styleMapStyleName:
isItalic = True
elif font.info.italicAngle != 0:
isItalic = True
return isItalic
def fontWidthWeightSort(fonts):
sortable = []
for font in fonts:
isItalic = _isItalic(font)
fileName = None
if font.path is not None:
fileName = os.path.basename(font.path)
s = (
font.info.familyName,
font.info.openTypeOS2WidthClass,
font.info.openTypeOS2WeightClass,
isItalic,
font.info.styleName,
fileName,
font
)
sortable.append(s)
fonts = [item[-1] for item in sorted(sortable)]
return fonts
# -----------
# Main Object
# -----------
class FontList(vanilla.List):
"""
This object presents the user with a standard list showing fonts.
It follows the same API as vanilla.List. When you set objects into
the view, you always pass font objects. The object will then extract
the relevant data to display.
Constructor Arguments:
All of the vanilla.List contstructor arguments apply, with the
following modifications.
columnDescriptions
This sets up the columns in the list. These follow the same format
of the column descriptions in vanilla.List. The only exception is that
you need to provide an "attribute" key/value pair. This is the font
attribute that the list will extract display values from. For example:
dict(title="Font Path", key="fontPath", attribute="path")
If no columnDescriptions is provided, the font will be shown in a single
single column represented with its file name or a combination of its
family and style names.
The list may show an "Options..." placard if either of the following is given:
placardSortItems
A list of dictionaries describing font sorting options. The dictionaries
must follow this form:
dict(title=string, callback=callback)
The title must begin with "Sort by" for this to work properly. The callback
must accept one argument: fonts. This will be a list of all fonts in the list.
The callback should return a list of sorted fonts.
placardItems
A list of dictionaries describing arbitrary items to show in the placard.
The dictionaries must follow this form:
dict(title=string, callback=callback)
The callback must accept one argument, sender, which will be the font list.
"""
nsScrollViewClass = DefconAppKitPlacardNSScrollView
def __init__(self, posSize, items,
placardSortItems=[
dict(title="Sort by File Name", callback=fontFileNameSort),
dict(title="Sort by Weight and Width", callback=fontWidthWeightSort),
],
placardItems=[],
**kwargs):
# make default column descriptions if needed
if not kwargs.get("columnDescriptions"):
kwargs["columnDescriptions"] = [fontListFontNameColumnDescription]
kwargs["showColumnTitles"] = False
# set some defaults
kwargs["autohidesScrollers"] = False
# build the internal column reference
self._keyToAttribute = {}
self._orderedListKeys = []
self._wrappedListItems = {}
for columnDescription in kwargs["columnDescriptions"]:
title = columnDescription["title"]
key = columnDescription.get("key", title)
attribute = columnDescription["attribute"]
self._keyToAttribute[key] = attribute
self._orderedListKeys.append(key)
# wrap the items
items = [self._wrapFontForList(font) for font in items]
# start the list
super(FontList, self).__init__(posSize, items, **kwargs)
# set the initial sort mode
self._sortMode = None
self._placardSortOptions = {}
self._placardOptions = {}
# placard
if len(placardSortItems) + len(placardItems):
# build the sort options
if placardSortItems:
self._sortMode = placardSortItems[0]["title"]
for d in placardSortItems:
title = d["title"]
assert title.startswith("Sort by")
self._placardSortOptions[title] = d["callback"]
# build the other options
if placardItems:
for d in placardItems:
self._placardOptions[d["title"]] = d["callback"]
# build
placardW = 65
placardH = 16
self._placard = vanilla.Group((0, 0, placardW, placardH))
# make a default item
item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_("Options...", None, "")
item.setHidden_(True)
items = [item]
# add the items
items += [d["title"] for d in placardSortItems]
items += [d["title"] for d in placardItems]
self._placard.optionsButton = PlacardPopUpButton((0, 0, placardW, placardH), items,
callback=self._placardCallback, sizeStyle="mini")
button = self._placard.optionsButton.getNSPopUpButton()
button.setTitle_("Options...")
self._nsObject.setPlacard_(self._placard.getNSView())
# update the sort
self._updateSort()
def _breakCycles(self):
for font in self._wrappedListItems.keys():
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
self._placard = None
self._placardSortOptions = {}
super(FontList, self)._breakCycles()
def setSortMode(self, mode):
"""
Set the sort mode in the popup.
"""
self._sortMode = mode
self._updateSort()
# -------------------
# Placard and Sorting
# -------------------
def _placardCallback(self, sender):
index = sender.get()
title = sender.getItems()[index]
# title item
if title == "Options...":
return
# sorting
elif title.startswith("Sort by"):
self._sortMode = title
self._updateSort()
# other
else:
self._placardOptions[title](self)
sender.set(0)
def _updateSort(self):
if self._sortMode is None:
return
# gather the wrappers and the selection states
oldSelection = self.getSelection()
fontToWrapper = {}
for index, wrapper in enumerate(self._arrayController.content()):
fontToWrapper[wrapper["_font"]] = (wrapper, index in oldSelection)
# sort the fonts
fonts = fontToWrapper.keys()
sortFunction = self._placardSortOptions[self._sortMode]
fonts = sortFunction(fonts)
# clear the list
count = len(self)
for index in range(count):
count -= 1
super(FontList, self).__delitem__(count)
# reset the items
sortedWrappers = []
newSelection = []
for index, font in enumerate(fonts):
wrapper, selected = fontToWrapper[font]
sortedWrappers.append(wrapper)
if selected:
newSelection.append(index)
super(FontList, self).set(sortedWrappers)
# reset the selection
self.setSelection(newSelection)
# -------------
# list behavior
# -------------
def _subscribeToFont(self, font):
font.addObserver(self, "_fontChanged", "Font.Changed")
def _unsubscribeFromFont(self, font):
font.removeObserver(self, "Font.Changed")
def _fontChanged(self, notification):
font = notification.object
if font not in self._wrappedListItems:
return
d = self._wrappedListItems[font]
for key, attr in self._keyToAttribute.items():
if attr == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attr)
d[key] = value
# editing
def _listEditCallback(self, sender):
# skip if in an edit loop
if self._listEditChangingFont is not None:
return
if not self.getSelection():
return
columnIndex, rowIndex = sender.getEditedColumnAndRow()
if columnIndex == -1 or rowIndex == -1:
rowIndex = self.getSelection()[0]
editedKey = None
editedAttribute = None
else:
editedKey = self._orderedListKeys[columnIndex]
editedAttribute = self._keyToAttribute[editedKey]
item = super(FontList, self).__getitem__(rowIndex)
font = item["_font"]()
self._listEditChangingAttribute = editedAttribute
self._listEditChangingFont = font
# known attribute. procees it individually.
if editedAttribute is not None:
# set the attribute
value = item[editedKey]
fontValue = getattr(font, editedAttribute)
if value != fontValue:
setattr(font, editedAttribute, value)
# unknown attribute. process all.
else:
for key, attribute in self._keyToAttribute.items():
value = getattr(font, attribute)
if value != item[key]:
setattr(font, attribute, item[key])
# update the dict contents
for key, attribute in self._keyToAttribute.items():
if key == editedKey and attribute == editedAttribute:
continue
value = getattr(font, attribute)
if value != item[key]:
item[key] = value
self._listEditChangingAttribute = None
self._listEditChangingFont = None
# wrapping
def _wrapFontForList(self, font):
changed = False
if font in self._wrappedListItems:
d = self._wrappedListItems[font]
else:
d = NSMutableDictionary.dictionary()
self._subscribeToFont(font)
for key, attribute in self._keyToAttribute.items():
if attribute == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attribute)
if not key in d or d.get(key) != value:
d[key] = value
changed = True
d["_font"] = font
if changed:
self._wrappedListItems[font] = d
return d
def _unwrapListItems(self, items=None):
if items is None:
items = super(FontList, self).get()
fonts = [d["_font"] for d in items]
return fonts
# standard API
def __contains__(self, font):
return font in self._wrappedListItems
def __getitem__(self, index):
item = super(FontList, self).__getitem__(index)
font = self._unwrapListItems([item])[0]
return font
def __setitem__(self, index, font):
existing = self[index]
item = self._wrapFontForList(font)
super(FontList, self).__setitem__(index, font)
if not super(FontList, self).__contains__(existing):
otherFont = existing["_font"]
del self._wrappedListItems[otherFont]
self._unsubscribeFromFont(otherFont)
def __delitem__(self, index):
item = super(FontList, self).__getitem__(index)
super(FontList, self).__delitem__(index)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def append(self, font):
item = self._wrapFontForList(font)
super(FontList, self).append(item)
def remove(self, font):
item = self._wrappedListItems[font]
super(FontList, self).remove(item)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def index(self, font):
item = self._wrappedListItems[font]
return super(FontList, self).index(item)
def insert(self, index, font):
item = self._wrapFontForList(font)
super(FontList, self).insert(index, item)
def extend(self, fonts):
items = [self._wrapFontForList(font) for font in fonts]
super(FontList, self).extend(items)
def set(self, fonts):
"""
Set the fonts in the list.
"""
# remove removed wrapped items
removedFonts = set(self._wrappedListItems) - set(fonts)
for font in removedFonts:
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
# wrap the fonts for the list
wrappedFonts = [self._wrapFontForList(font) for font in fonts]
# set the list
super(FontList, self).set(wrappedFonts)
def get(self):
"""
Get the fonts in the list.
"""
return self._unwrapListItems()
# --------------------------
# Formatters, Cells and Such
# --------------------------
class DirtyStatusIndicatorCell(NSActionCell):
def drawWithFrame_inView_(self, frame, view):
value = self.objectValue()
if not value:
image = _drawDirtyStateImage(value)
image = _drawDirtyStateImage(value)
image.drawAtPoint_fromRect_operation_fraction_(frame.origin, ((0, 0), (13, 17)), NSCompositeSourceOver, 1.0)
def _drawDirtyStateImage(value):
if value:
imageName = "defconAppKitFontListDirtyStateTrue"
else:
imageName = "defconAppKitFontListDirtyStateFalse"
image = NSImage.imageNamed_(imageName)
if image is None:
# make the image
width = 13
height = 17
image = NSImage.alloc().initWithSize_((width, height))
image.lockFocus()
# draw if dirty
if value:
rect = ((2, 4), (9, 9))
path = NSBezierPath.bezierPathWithOvalInRect_(rect)
path.addClip()
# colors
color1 = NSColor.colorWithCalibratedRed_green_blue_alpha_(1.0, 0.1, 0.1, 1)
color2 = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.5, 0.0, 0.0, 1)
# fill
color1.set()
path.fill()
# shadow
try:
gradient = NSGradient.alloc().initWithColors_([color1, color2])
gradient.drawInBezierPath_angle_(path, -90)
except NameError:
pass
# stroke
color2.set()
path.setLineWidth_(2)
path.stroke()
image.unlockFocus()
image.setName_(imageName)
image = NSImage.imageNamed_(imageName)
return image
class FilePathFormatter(NSFormatter):
def stringForObjectValue_(self, obj):
if obj is None or isinstance(obj, NSNull):
return ""
return obj
def attributedStringForObjectValue_withDefaultAttributes_(self, obj, attrs):
if obj is None or isinstance(obj, NSNull):
obj = ""
paragraph = NSMutableParagraphStyle.alloc().init()
paragraph.setLineBreakMode_(NSLineBreakByTruncatingHead)
attrs = dict(attrs)
attrs[NSParagraphStyleAttributeName] = paragraph
return NSAttributedString.alloc().initWithString_attributes_(obj, attrs)
def objectValueForString_(self, string):
return string
def makeDefaultIDString(font):
if font.path is None:
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
return s
else:
return os.path.basename(font.path)
# --------------------------
# Common Column Descriptions
# --------------------------
defaultFontIDAttribute = "defconAppKitFontIDString"
fontListFontNameColumnDescription = dict(title="Font", attribute=defaultFontIDAttribute, editable=False)
fontListFontPathColumnDescription = dict(title="Path", attribute="path", editable=False, formatter=FilePathFormatter.alloc().init())
fontListDirtyStateColoumnDescription = dict(title="Dirty", attribute="dirty", cell=DirtyStatusIndicatorCell.alloc().init(), width=13, editable=False)
|
{
"content_hash": "3c3e08e205e9393c8b87c3fdf374fa5f",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 149,
"avg_line_length": 34.72489959839358,
"alnum_prop": 0.5916844966171283,
"repo_name": "Ye-Yong-Chi/defconAppKit",
"id": "86d19dcdf1f8266b349a190ee4572adf62e78dc7",
"size": "17293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/defconAppKit/controls/fontList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "302141"
}
],
"symlink_target": ""
}
|
import datetime
from tempest.api.compute import base
from tempest import test
from tempest.test import attr
import time
class TenantUsagesV3Test(base.BaseV3ComputeAdminTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TenantUsagesV3Test, cls).setUpClass()
cls.adm_client = cls.tenant_usages_admin_client
cls.client = cls.tenant_usages_client
cls.identity_client = cls._get_identity_admin_client()
resp, tenants = cls.identity_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
# Create a server in the demo tenant
resp, server = cls.create_test_server(wait_until='ACTIVE')
time.sleep(2)
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.skip_because(bug='1265416')
@attr(type='gate')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
params = {'start': self.start,
'end': self.end,
'detailed': int(bool(True))}
resp, tenant_usage = self.adm_client.list_tenant_usages(params)
self.assertEqual(200, resp.status)
self.assertEqual(len(tenant_usage), 8)
@test.skip_because(bug='1265416')
@attr(type='gate')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
params = {'start': self.start,
'end': self.end}
resp, tenant_usage = self.adm_client.get_tenant_usage(
self.tenant_id, params)
self.assertEqual(200, resp.status)
self.assertEqual(len(tenant_usage), 8)
@test.skip_because(bug='1265416')
@attr(type='gate')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
params = {'start': self.start,
'end': self.end}
resp, tenant_usage = self.client.get_tenant_usage(
self.tenant_id, params)
self.assertEqual(200, resp.status)
self.assertEqual(len(tenant_usage), 8)
|
{
"content_hash": "1e8307afd5944ce6aed8c5eb4c773c70",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 72,
"avg_line_length": 33.91428571428571,
"alnum_prop": 0.6141533277169334,
"repo_name": "ntymtsiv/tempest",
"id": "e16332fa47cd55760ce7c64b72c6c1844a8f1e3e",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/compute/v3/admin/test_simple_tenant_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2312198"
},
{
"name": "Shell",
"bytes": "9160"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
# AWX
import awx.main.fields
from awx.main.models import CredentialType
from awx.main.utils.common import set_current_apps
def setup_tower_managed_defaults(apps, schema_editor):
set_current_apps(apps)
CredentialType.setup_tower_managed_defaults()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
('main', '0066_v350_inventorysource_custom_virtualenv'),
]
operations = [
migrations.CreateModel(
name='CredentialInputSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(blank=True, default='')),
('input_field_name', models.CharField(max_length=1024)),
('metadata', awx.main.fields.DynamicCredentialInputField(blank=True, default=dict)),
('created_by', models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{'class': 'credentialinputsource', 'model_name': 'credentialinputsource', 'app_label': 'main'}(class)s_created+", to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="{'class': 'credentialinputsource', 'model_name': 'credentialinputsource', 'app_label': 'main'}(class)s_modified+", to=settings.AUTH_USER_MODEL)),
('source_credential', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential')),
('tags', taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
('target_credential', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential')),
],
),
migrations.AlterField(
model_name='credentialtype',
name='kind',
field=models.CharField(choices=[('ssh', 'Machine'), ('vault', 'Vault'), ('net', 'Network'), ('scm', 'Source Control'), ('cloud', 'Cloud'), ('insights', 'Insights'), ('external', 'External')], max_length=32),
),
migrations.AlterUniqueTogether(
name='credentialinputsource',
unique_together=set([('target_credential', 'input_field_name')]),
),
migrations.RunPython(setup_tower_managed_defaults),
]
|
{
"content_hash": "7327fba02c92100bfa18cb21ce26ef4d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 296,
"avg_line_length": 56,
"alnum_prop": 0.6610449735449735,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "32190b2bf21c634fa9678cb9a3c82b157e13d24b",
"size": "3048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/migrations/0067_v350_credential_plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
class Account(object):
def __init__(self, holder, number, balance,credit_line=1500):
self.Holder = holder
self.Number = number
self.Balance = balance
self.CreditLine = credit_line
def deposit(self, amount):
self.Balance = amount
def withdraw(self, amount):
if(self.Balance - amount < -self.CreditLine):
# coverage insufficient
return False
else:
self.Balance -= amount
return True
def balance(self):
return self.Balance
def transfer(self, target, amount):
if(self.Balance - amount < -self.CreditLine):
# coverage insufficient
return False
else:
self.Balance -= amount
target.Balance += amount
return True
|
{
"content_hash": "11bc380be7ec966ce7a3268dc4c25120",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 28.448275862068964,
"alnum_prop": 0.5587878787878788,
"repo_name": "jimkiiru/james-kiiru-bc17-week1",
"id": "7acf443dc785a717b6b30adaebf86bd24c58b0c8",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day2/OOP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2567"
},
{
"name": "HTML",
"bytes": "1502"
},
{
"name": "Python",
"bytes": "5387"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from setuptools import setup
from pyxl320.version import __version__ as VERSION
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
PACKAGE_NAME = 'pyxl320'
BuildCommand.pkg = PACKAGE_NAME
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
setup(
author='Kevin Walchko',
author_email='walchko@users.noreply.github.com',
name=PACKAGE_NAME,
version=VERSION,
description='A library to control dynamixel XL-320 servos with python',
long_description=open('README.rst').read(),
url='http://github.com/walchko/{}'.format(PACKAGE_NAME),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
license='MIT',
keywords=['dynamixel', 'xl320', 'xl-320', 'servo', 'actuator', 'library', 'robotics', 'robot', 'smart', 'spider'],
packages=[PACKAGE_NAME],
install_requires=open('requirements.txt').readlines(),
cmdclass={
'publish': PublishCommand,
'make': BuildCommand
},
scripts=[
'bin/set_id.py',
'bin/servo_ping.py',
'bin/set_angle.py',
'bin/set_baud_rate.py',
'bin/servo_reboot.py',
'bin/servo_reset.py',
'bin/get_angle.py'
]
)
|
{
"content_hash": "5a55f5a4fd87b663e398b773ff808944",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 115,
"avg_line_length": 30.745098039215687,
"alnum_prop": 0.7098214285714286,
"repo_name": "walchko/pyxl320",
"id": "1e957e53ce4f167fdc1c88b998c5c20bbcb55b50",
"size": "1753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44646"
}
],
"symlink_target": ""
}
|
from pywb.warcserver.index.indexsource import MementoIndexSource
from pywb.warcserver.index.indexsource import RemoteIndexSource
from pywb.warcserver.index.indexsource import WBMementoIndexSource
from webrecorder.load.wamloader import WAMLoader
# ============================================================================
class WAMSourceLoader(WAMLoader):
def __init__(self, memento_cls=None, remote_cls=None, wb_memento_cls=None):
self.sources = {}
self.memento_cls = memento_cls or MementoIndexSource
self.remote_cls = remote_cls or RemoteIndexSource
self.wb_memento_cls = wb_memento_cls or WBMementoIndexSource
super(WAMSourceLoader, self).__init__()
def load_archive(self, pk, webarchive):
# if archive was not loaded, don't init source
if not super(WAMSourceLoader, self).load_archive(pk, webarchive):
return False
collections = webarchive.get('collections')
replay_url = self.replay_info[pk]['replay_url']
apis = webarchive['apis']
if collections and isinstance(collections, list):
for coll in collections:
coll_name = pk + ':' + coll['id']
self.add_source(replay_url, apis, coll_name, coll['id'])
else:
coll = ''
if collections:
if 'cdx' not in apis:
# regex collections only supported with cdx for now
return
coll = '{src_coll}'
self.add_source(replay_url, apis, pk, collection=coll)
return True
def add_source(self, replay, apis, pk, collection=''):
replay = replay.replace('{collection}', collection)
source = None
if 'memento' in apis:
timegate = apis['memento']['timegate'].replace('{collection}', collection) + '{url}'
timemap = apis['memento']['timemap'].replace('{collection}', collection) + '{url}'
source = self.memento_cls(timegate, timemap, replay)
elif 'cdx' in apis:
query = apis['cdx']['query'].replace('{collection}', collection)
source = self.remote_cls(query, replay)
else:
source = self.wb_memento_cls(replay, '', replay)
if source:
self.sources[pk] = source
|
{
"content_hash": "60c57d11e385c3aef46170d3ececbc3f",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 96,
"avg_line_length": 35.66153846153846,
"alnum_prop": 0.5867126833477135,
"repo_name": "webrecorder/webrecorder",
"id": "7b7c2cdfe44d9337e37ce6066f8ae3ca3ef68578",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webrecorder/webrecorder/load/wamsourceloader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "186476"
},
{
"name": "Dockerfile",
"bytes": "1370"
},
{
"name": "HTML",
"bytes": "258583"
},
{
"name": "JavaScript",
"bytes": "869251"
},
{
"name": "Python",
"bytes": "892243"
},
{
"name": "Shell",
"bytes": "2598"
}
],
"symlink_target": ""
}
|
from mparts.manager import Task
from mparts.host import HostInfo
from support import ResultsProvider, SetCPUs, FileSystem, SystemMonitor, waitForLog
import os, signal, re
__all__ = []
__all__.append("EximDaemon")
class EximDaemon(Task):
__info__ = ["host", "eximPath", "eximBuild", "mailDir", "spoolDir", "port"]
def __init__(self, host, eximPath, eximBuild, mailDir, spoolDir, port):
Task.__init__(self, host = host)
self.host = host
self.eximPath = eximPath
self.eximBuild = eximBuild
self.mailDir = mailDir
self.spoolDir = spoolDir
self.port = port
self.__proc = None
def start(self):
# Create configuration
config = self.host.outDir(self.name + ".configure")
self.host.r.run(
[os.path.join(self.eximPath, "mkconfig"),
os.path.join(self.eximPath, self.eximBuild),
self.mailDir, self.spoolDir],
stdout = config)
# Start Exim
self.__proc = self.host.r.run(
[os.path.join(self.eximPath, self.eximBuild, "bin", "exim"),
"-bdf", "-oX", str(self.port), "-C", config],
wait = False)
waitForLog(self.host, os.path.join(self.spoolDir, "log", "mainlog"),
"exim", 5, "listening for SMTP")
def stop(self):
# Ugh, there's no way to cleanly shut down Exim, so we can't
# check for a sensible exit code.
self.__proc.kill(signal.SIGTERM)
def reset(self):
if self.__proc:
self.stop()
__all__.append("EximLoad")
class EximLoad(Task, ResultsProvider):
__info__ = ["host", "trial", "eximPath", "clients", "port", "*sysmonOut"]
# XXX Control warmup/duration
def __init__(self, host, trial, eximPath, cores, clients, port, sysmon):
Task.__init__(self, host = host, trial = trial)
ResultsProvider.__init__(self, cores)
self.host = host
self.trial = trial
self.eximPath = eximPath
self.clients = clients
self.port = port
self.sysmon = sysmon
def wait(self):
# We may want to wipe out old mail files, but it doesn't seem
# to make a difference.
cmd = [os.path.join(self.eximPath, "run-smtpbm"),
str(self.clients), str(self.port)]
cmd = self.sysmon.wrap(cmd, "Starting", "Stopped")
# Run
logPath = self.host.getLogPath(self)
self.host.r.run(cmd, stdout = logPath)
# XXX Sanity check no paniclog or rejectlog, non-empty mboxes,
# non-empty mainlog
# Get result
log = self.host.r.readFile(logPath)
self.sysmonOut = self.sysmon.parseLog(log)
ms = re.findall("(?m)^([0-9]+) messages", log)
if len(ms) != 1:
raise RuntimeError("Expected 1 message count in log, got %d",
len(ms))
self.setResults(int(ms[0]), "message", "messages",
self.sysmonOut["time.real"])
class EximRunner(object):
def __str__(self):
return "exim"
@staticmethod
def run(m, cfg):
if not cfg.hotplug:
raise RuntimeError("The Exim benchmark requires hotplug = True. "
"Either enable hotplug or disable the Exim "
"benchmark in config.py.")
host = cfg.primaryHost
m += host
m += HostInfo(host)
fs = FileSystem(host, cfg.fs, clean = True)
m += fs
eximPath = os.path.join(cfg.benchRoot, "exim")
m += SetCPUs(host = host, num = cfg.cores)
m += EximDaemon(host, eximPath, cfg.eximBuild,
os.path.join(fs.path + "0"),
os.path.join(fs.path + "spool"),
cfg.eximPort)
sysmon = SystemMonitor(host)
m += sysmon
for trial in range(cfg.trials):
# XXX It would be a pain to make clients dependent on
# cfg.cores.
m += EximLoad(host, trial, eximPath, cfg.cores,
cfg.clients, cfg.eximPort, sysmon)
# m += cfg.monitors
m.run()
__all__.append("runner")
runner = EximRunner()
|
{
"content_hash": "4cd3ff0a8c1e58221a9badcb58857635",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 83,
"avg_line_length": 34.58196721311475,
"alnum_prop": 0.5517895235837876,
"repo_name": "KMU-embedded/mosbench-ext",
"id": "459b9067500c43d2794344104a7358540783d9e6",
"size": "4219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exim/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "8491"
},
{
"name": "Awk",
"bytes": "45243"
},
{
"name": "Batchfile",
"bytes": "15130"
},
{
"name": "C",
"bytes": "38923116"
},
{
"name": "C++",
"bytes": "644544"
},
{
"name": "CSS",
"bytes": "38896"
},
{
"name": "DTrace",
"bytes": "12271"
},
{
"name": "Erlang",
"bytes": "312670"
},
{
"name": "Frege",
"bytes": "146785"
},
{
"name": "Groff",
"bytes": "255736"
},
{
"name": "HTML",
"bytes": "1026176"
},
{
"name": "Lex",
"bytes": "149807"
},
{
"name": "Makefile",
"bytes": "368369"
},
{
"name": "Objective-C",
"bytes": "20461"
},
{
"name": "PLpgSQL",
"bytes": "808278"
},
{
"name": "Perl",
"bytes": "336526"
},
{
"name": "Perl6",
"bytes": "11115"
},
{
"name": "Prolog",
"bytes": "11284"
},
{
"name": "Python",
"bytes": "198848"
},
{
"name": "SQLPL",
"bytes": "105796"
},
{
"name": "Shell",
"bytes": "982753"
},
{
"name": "SourcePawn",
"bytes": "6894"
},
{
"name": "TeX",
"bytes": "2582"
},
{
"name": "XS",
"bytes": "4040"
},
{
"name": "XSLT",
"bytes": "10992"
},
{
"name": "Yacc",
"bytes": "569728"
}
],
"symlink_target": ""
}
|
"""The IPython Controller Hub with 0MQ
This is the master object that handles connections from engines and clients,
and monitors traffic through the various queues.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import json
import os
import sys
import time
from datetime import datetime
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
# internal:
from IPython.utils.importstring import import_item
from IPython.utils.jsonutil import extract_dates
from IPython.utils.localinterfaces import localhost
from IPython.utils.py3compat import cast_bytes, unicode_type, iteritems
from IPython.utils.traitlets import (
HasTraits, Instance, Integer, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName
)
from IPython.parallel import error, util
from IPython.parallel.factory import RegistrationFactory
from IPython.kernel.zmq.session import SessionFactory
from .heartmonitor import HeartMonitor
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _passer(*args, **kwargs):
return
def _printer(*args, **kwargs):
print (args)
print (kwargs)
def empty_record():
"""Return an empty dict with all record keys."""
return {
'msg_id' : None,
'header' : None,
'metadata' : None,
'content': None,
'buffers': None,
'submitted': None,
'client_uuid' : None,
'engine_uuid' : None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header' : None,
'result_metadata' : None,
'result_content' : None,
'result_buffers' : None,
'queue' : None,
'pyin' : None,
'pyout': None,
'pyerr': None,
'stdout': '',
'stderr': '',
}
def init_record(msg):
"""Initialize a TaskRecord based on a request."""
header = msg['header']
return {
'msg_id' : header['msg_id'],
'header' : header,
'content': msg['content'],
'metadata': msg['metadata'],
'buffers': msg['buffers'],
'submitted': header['date'],
'client_uuid' : None,
'engine_uuid' : None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header' : None,
'result_metadata': None,
'result_content' : None,
'result_buffers' : None,
'queue' : None,
'pyin' : None,
'pyout': None,
'pyerr': None,
'stdout': '',
'stderr': '',
}
class EngineConnector(HasTraits):
"""A simple object for accessing the various zmq connections of an object.
Attributes are:
id (int): engine ID
uuid (unicode): engine UUID
pending: set of msg_ids
stallback: DelayedCallback for stalled registration
"""
id = Integer(0)
uuid = Unicode()
pending = Set()
stallback = Instance(ioloop.DelayedCallback)
_db_shortcuts = {
'sqlitedb' : 'IPython.parallel.controller.sqlitedb.SQLiteDB',
'mongodb' : 'IPython.parallel.controller.mongodb.MongoDB',
'dictdb' : 'IPython.parallel.controller.dictdb.DictDB',
'nodb' : 'IPython.parallel.controller.dictdb.NoDB',
}
class HubFactory(RegistrationFactory):
"""The Configurable for setting up a Hub."""
# port-pairs for monitoredqueues:
hb = Tuple(Integer,Integer,config=True,
help="""PUB/ROUTER Port pair for Engine heartbeats""")
def _hb_default(self):
return tuple(util.select_random_ports(2))
mux = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for MUX queue""")
def _mux_default(self):
return tuple(util.select_random_ports(2))
task = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for Task queue""")
def _task_default(self):
return tuple(util.select_random_ports(2))
control = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for Control queue""")
def _control_default(self):
return tuple(util.select_random_ports(2))
iopub = Tuple(Integer,Integer,config=True,
help="""Client/Engine Port pair for IOPub relay""")
def _iopub_default(self):
return tuple(util.select_random_ports(2))
# single ports:
mon_port = Integer(config=True,
help="""Monitor (SUB) port for queue traffic""")
def _mon_port_default(self):
return util.select_random_ports(1)[0]
notifier_port = Integer(config=True,
help="""PUB port for sending engine status notifications""")
def _notifier_port_default(self):
return util.select_random_ports(1)[0]
engine_ip = Unicode(config=True,
help="IP on which to listen for engine connections. [default: loopback]")
def _engine_ip_default(self):
return localhost()
engine_transport = Unicode('tcp', config=True,
help="0MQ transport for engine connections. [default: tcp]")
client_ip = Unicode(config=True,
help="IP on which to listen for client connections. [default: loopback]")
client_transport = Unicode('tcp', config=True,
help="0MQ transport for client connections. [default : tcp]")
monitor_ip = Unicode(config=True,
help="IP on which to listen for monitor messages. [default: loopback]")
monitor_transport = Unicode('tcp', config=True,
help="0MQ transport for monitor messages. [default : tcp]")
_client_ip_default = _monitor_ip_default = _engine_ip_default
monitor_url = Unicode('')
db_class = DottedObjectName('NoDB',
config=True, help="""The class to use for the DB backend
Options include:
SQLiteDB: SQLite
MongoDB : use MongoDB
DictDB : in-memory storage (fastest, but be mindful of memory growth of the Hub)
NoDB : disable database altogether (default)
""")
registration_timeout = Integer(0, config=True,
help="Engine registration timeout in seconds [default: max(30,"
"10*heartmonitor.period)]" )
def _registration_timeout_default(self):
if self.heartmonitor is None:
# early initialization, this value will be ignored
return 0
# heartmonitor period is in milliseconds, so 10x in seconds is .01
return max(30, int(.01 * self.heartmonitor.period))
# not configurable
db = Instance('IPython.parallel.controller.dictdb.BaseDB')
heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor')
def _ip_changed(self, name, old, new):
self.engine_ip = new
self.client_ip = new
self.monitor_ip = new
self._update_monitor_url()
def _update_monitor_url(self):
self.monitor_url = "%s://%s:%i" % (self.monitor_transport, self.monitor_ip, self.mon_port)
def _transport_changed(self, name, old, new):
self.engine_transport = new
self.client_transport = new
self.monitor_transport = new
self._update_monitor_url()
def __init__(self, **kwargs):
super(HubFactory, self).__init__(**kwargs)
self._update_monitor_url()
def construct(self):
self.init_hub()
def start(self):
self.heartmonitor.start()
self.log.info("Heartmonitor started")
def client_url(self, channel):
"""return full zmq url for a named client channel"""
return "%s://%s:%i" % (self.client_transport, self.client_ip, self.client_info[channel])
def engine_url(self, channel):
"""return full zmq url for a named engine channel"""
return "%s://%s:%i" % (self.engine_transport, self.engine_ip, self.engine_info[channel])
def init_hub(self):
"""construct Hub object"""
ctx = self.context
loop = self.loop
if 'TaskScheduler.scheme_name' in self.config:
scheme = self.config.TaskScheduler.scheme_name
else:
from .scheduler import TaskScheduler
scheme = TaskScheduler.scheme_name.get_default_value()
# build connection dicts
engine = self.engine_info = {
'interface' : "%s://%s" % (self.engine_transport, self.engine_ip),
'registration' : self.regport,
'control' : self.control[1],
'mux' : self.mux[1],
'hb_ping' : self.hb[0],
'hb_pong' : self.hb[1],
'task' : self.task[1],
'iopub' : self.iopub[1],
}
client = self.client_info = {
'interface' : "%s://%s" % (self.client_transport, self.client_ip),
'registration' : self.regport,
'control' : self.control[0],
'mux' : self.mux[0],
'task' : self.task[0],
'task_scheme' : scheme,
'iopub' : self.iopub[0],
'notification' : self.notifier_port,
}
self.log.debug("Hub engine addrs: %s", self.engine_info)
self.log.debug("Hub client addrs: %s", self.client_info)
# Registrar socket
q = ZMQStream(ctx.socket(zmq.ROUTER), loop)
util.set_hwm(q, 0)
q.bind(self.client_url('registration'))
self.log.info("Hub listening on %s for registration.", self.client_url('registration'))
if self.client_ip != self.engine_ip:
q.bind(self.engine_url('registration'))
self.log.info("Hub listening on %s for registration.", self.engine_url('registration'))
### Engine connections ###
# heartbeat
hpub = ctx.socket(zmq.PUB)
hpub.bind(self.engine_url('hb_ping'))
hrep = ctx.socket(zmq.ROUTER)
util.set_hwm(hrep, 0)
hrep.bind(self.engine_url('hb_pong'))
self.heartmonitor = HeartMonitor(loop=loop, parent=self, log=self.log,
pingstream=ZMQStream(hpub,loop),
pongstream=ZMQStream(hrep,loop)
)
### Client connections ###
# Notifier socket
n = ZMQStream(ctx.socket(zmq.PUB), loop)
n.bind(self.client_url('notification'))
### build and launch the queues ###
# monitor socket
sub = ctx.socket(zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b"")
sub.bind(self.monitor_url)
sub.bind('inproc://monitor')
sub = ZMQStream(sub, loop)
# connect the db
db_class = _db_shortcuts.get(self.db_class.lower(), self.db_class)
self.log.info('Hub using DB backend: %r', (db_class.split('.')[-1]))
self.db = import_item(str(db_class))(session=self.session.session,
parent=self, log=self.log)
time.sleep(.25)
# resubmit stream
r = ZMQStream(ctx.socket(zmq.DEALER), loop)
url = util.disambiguate_url(self.client_url('task'))
r.connect(url)
# convert seconds to msec
registration_timeout = 1000*self.registration_timeout
self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
query=q, notifier=n, resubmit=r, db=self.db,
engine_info=self.engine_info, client_info=self.client_info,
log=self.log, registration_timeout=registration_timeout)
class Hub(SessionFactory):
"""The IPython Controller Hub with 0MQ connections
Parameters
==========
loop: zmq IOLoop instance
session: Session object
<removed> context: zmq context for creating new connections (?)
queue: ZMQStream for monitoring the command queue (SUB)
query: ZMQStream for engine registration and client queries requests (ROUTER)
heartbeat: HeartMonitor object checking the pulse of the engines
notifier: ZMQStream for broadcasting engine registration changes (PUB)
db: connection to db for out of memory logging of commands
NotImplemented
engine_info: dict of zmq connection information for engines to connect
to the queues.
client_info: dict of zmq connection information for engines to connect
to the queues.
"""
engine_state_file = Unicode()
# internal data structures:
ids=Set() # engine IDs
keytable=Dict()
by_ident=Dict()
engines=Dict()
clients=Dict()
hearts=Dict()
pending=Set()
queues=Dict() # pending msg_ids keyed by engine_id
tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
completed=Dict() # completed msg_ids keyed by engine_id
all_completed=Set() # completed msg_ids keyed by engine_id
dead_engines=Set() # completed msg_ids keyed by engine_id
unassigned=Set() # set of task msg_ds not yet assigned a destination
incoming_registrations=Dict()
registration_timeout=Integer()
_idcounter=Integer(0)
# objects from constructor:
query=Instance(ZMQStream)
monitor=Instance(ZMQStream)
notifier=Instance(ZMQStream)
resubmit=Instance(ZMQStream)
heartmonitor=Instance(HeartMonitor)
db=Instance(object)
client_info=Dict()
engine_info=Dict()
def __init__(self, **kwargs):
"""
# universal:
loop: IOLoop for creating future connections
session: streamsession for sending serialized data
# engine:
queue: ZMQStream for monitoring queue messages
query: ZMQStream for engine+client registration and client requests
heartbeat: HeartMonitor object for tracking engines
# extra:
db: ZMQStream for db connection (NotImplemented)
engine_info: zmq address/protocol dict for engine connections
client_info: zmq address/protocol dict for client connections
"""
super(Hub, self).__init__(**kwargs)
# register our callbacks
self.query.on_recv(self.dispatch_query)
self.monitor.on_recv(self.dispatch_monitor_traffic)
self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
self.monitor_handlers = {b'in' : self.save_queue_request,
b'out': self.save_queue_result,
b'intask': self.save_task_request,
b'outtask': self.save_task_result,
b'tracktask': self.save_task_destination,
b'incontrol': _passer,
b'outcontrol': _passer,
b'iopub': self.save_iopub_message,
}
self.query_handlers = {'queue_request': self.queue_status,
'result_request': self.get_results,
'history_request': self.get_history,
'db_request': self.db_query,
'purge_request': self.purge_results,
'load_request': self.check_load,
'resubmit_request': self.resubmit_task,
'shutdown_request': self.shutdown_request,
'registration_request' : self.register_engine,
'unregistration_request' : self.unregister_engine,
'connection_request': self.connection_request,
}
# ignore resubmit replies
self.resubmit.on_recv(lambda msg: None, copy=False)
self.log.info("hub::created hub")
@property
def _next_id(self):
"""gemerate a new ID.
No longer reuse old ids, just count from 0."""
newid = self._idcounter
self._idcounter += 1
return newid
# newid = 0
# incoming = [id[0] for id in itervalues(self.incoming_registrations)]
# # print newid, self.ids, self.incoming_registrations
# while newid in self.ids or newid in incoming:
# newid += 1
# return newid
#-----------------------------------------------------------------------------
# message validation
#-----------------------------------------------------------------------------
def _validate_targets(self, targets):
"""turn any valid targets argument into a list of integer ids"""
if targets is None:
# default to all
return self.ids
if isinstance(targets, (int,str,unicode_type)):
# only one target specified
targets = [targets]
_targets = []
for t in targets:
# map raw identities to ids
if isinstance(t, (str,unicode_type)):
t = self.by_ident.get(cast_bytes(t), t)
_targets.append(t)
targets = _targets
bad_targets = [ t for t in targets if t not in self.ids ]
if bad_targets:
raise IndexError("No Such Engine: %r" % bad_targets)
if not targets:
raise IndexError("No Engines Registered")
return targets
#-----------------------------------------------------------------------------
# dispatch methods (1 per stream)
#-----------------------------------------------------------------------------
@util.log_errors
def dispatch_monitor_traffic(self, msg):
"""all ME and Task queue messages come through here, as well as
IOPub traffic."""
self.log.debug("monitor traffic: %r", msg[0])
switch = msg[0]
try:
idents, msg = self.session.feed_identities(msg[1:])
except ValueError:
idents=[]
if not idents:
self.log.error("Monitor message without topic: %r", msg)
return
handler = self.monitor_handlers.get(switch, None)
if handler is not None:
handler(idents, msg)
else:
self.log.error("Unrecognized monitor topic: %r", switch)
@util.log_errors
def dispatch_query(self, msg):
"""Route registration requests and queries from clients."""
try:
idents, msg = self.session.feed_identities(msg)
except ValueError:
idents = []
if not idents:
self.log.error("Bad Query Message: %r", msg)
return
client_id = idents[0]
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
content = error.wrap_exception()
self.log.error("Bad Query Message: %r", msg, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
# print client_id, header, parent, content
#switch on message type:
msg_type = msg['header']['msg_type']
self.log.info("client::client %r requested %r", client_id, msg_type)
handler = self.query_handlers.get(msg_type, None)
try:
assert handler is not None, "Bad Message Type: %r" % msg_type
except:
content = error.wrap_exception()
self.log.error("Bad Message Type: %r", msg_type, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
else:
handler(idents, msg)
def dispatch_db(self, msg):
""""""
raise NotImplementedError
#---------------------------------------------------------------------------
# handler methods (1 per event)
#---------------------------------------------------------------------------
#----------------------- Heartbeat --------------------------------------
def handle_new_heart(self, heart):
"""handler to attach to heartbeater.
Called when a new heart starts to beat.
Triggers completion of registration."""
self.log.debug("heartbeat::handle_new_heart(%r)", heart)
if heart not in self.incoming_registrations:
self.log.info("heartbeat::ignoring new heart: %r", heart)
else:
self.finish_registration(heart)
def handle_heart_failure(self, heart):
"""handler to attach to heartbeater.
called when a previously registered heart fails to respond to beat request.
triggers unregistration"""
self.log.debug("heartbeat::handle_heart_failure(%r)", heart)
eid = self.hearts.get(heart, None)
uuid = self.engines[eid].uuid
if eid is None or self.keytable[eid] in self.dead_engines:
self.log.info("heartbeat::ignoring heart failure %r (not an engine or already dead)", heart)
else:
self.unregister_engine(heart, dict(content=dict(id=eid, queue=uuid)))
#----------------------- MUX Queue Traffic ------------------------------
def save_queue_request(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
queue_id, client_id = idents[:2]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("queue::client %r sent invalid message to %r: %r", client_id, queue_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error("queue::target %r not registered", queue_id)
self.log.debug("queue:: valid are: %r", self.by_ident.keys())
return
record = init_record(msg)
msg_id = record['msg_id']
self.log.info("queue::client %r submitted request %r to %s", client_id, msg_id, eid)
# Unicode in records
record['engine_uuid'] = queue_id.decode('ascii')
record['client_uuid'] = msg['header']['session']
record['queue'] = 'mux'
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
for key,evalue in iteritems(existing):
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error("DB Error adding record %r", msg_id, exc_info=True)
self.pending.add(msg_id)
self.queues[eid].append(msg_id)
def save_queue_result(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
client_id, queue_id = idents[:2]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("queue::engine %r sent invalid message to %r: %r",
queue_id, client_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error("queue::unknown engine %r is sending a reply: ", queue_id)
return
parent = msg['parent_header']
if not parent:
return
msg_id = parent['msg_id']
if msg_id in self.pending:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
self.queues[eid].remove(msg_id)
self.completed[eid].append(msg_id)
self.log.info("queue::request %r completed on %s", msg_id, eid)
elif msg_id not in self.all_completed:
# it could be a result from a dead engine that died before delivering the
# result
self.log.warn("queue:: unknown msg finished %r", msg_id)
return
# update record anyway, because the unregistration could have been premature
rheader = msg['header']
md = msg['metadata']
completed = rheader['date']
started = extract_dates(md.get('started', None))
result = {
'result_header' : rheader,
'result_metadata': md,
'result_content': msg['content'],
'received': datetime.now(),
'started' : started,
'completed' : completed
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
#--------------------- Task Queue Traffic ------------------------------
def save_task_request(self, idents, msg):
"""Save the submission of a task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("task::client %r sent invalid task message: %r",
client_id, msg, exc_info=True)
return
record = init_record(msg)
record['client_uuid'] = msg['header']['session']
record['queue'] = 'task'
header = msg['header']
msg_id = header['msg_id']
self.pending.add(msg_id)
self.unassigned.add(msg_id)
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
if existing['resubmitted']:
for key in ('submitted', 'client_uuid', 'buffers'):
# don't clobber these keys on resubmit
# submitted and client_uuid should be different
# and buffers might be big, and shouldn't have changed
record.pop(key)
# still check content,header which should not change
# but are not expensive to compare as buffers
for key,evalue in iteritems(existing):
if key.endswith('buffers'):
# don't compare buffers
continue
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error("DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error("DB Error adding record %r", msg_id, exc_info=True)
except Exception:
self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
def save_task_result(self, idents, msg):
"""save the result of a completed task."""
client_id = idents[0]
try:
msg = self.session.unserialize(msg)
except Exception:
self.log.error("task::invalid task result message send to %r: %r",
client_id, msg, exc_info=True)
return
parent = msg['parent_header']
if not parent:
# print msg
self.log.warn("Task %r had no parent!", msg)
return
msg_id = parent['msg_id']
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
header = msg['header']
md = msg['metadata']
engine_uuid = md.get('engine', u'')
eid = self.by_ident.get(cast_bytes(engine_uuid), None)
status = md.get('status', None)
if msg_id in self.pending:
self.log.info("task::task %r finished on %s", msg_id, eid)
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
if eid is not None:
if status != 'aborted':
self.completed[eid].append(msg_id)
if msg_id in self.tasks[eid]:
self.tasks[eid].remove(msg_id)
completed = header['date']
started = extract_dates(md.get('started', None))
result = {
'result_header' : header,
'result_metadata': msg['metadata'],
'result_content': msg['content'],
'started' : started,
'completed' : completed,
'received' : datetime.now(),
'engine_uuid': engine_uuid,
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
else:
self.log.debug("task::unknown task %r finished", msg_id)
def save_task_destination(self, idents, msg):
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
self.log.error("task::invalid task tracking message", exc_info=True)
return
content = msg['content']
# print (content)
msg_id = content['msg_id']
engine_uuid = content['engine_id']
eid = self.by_ident[cast_bytes(engine_uuid)]
self.log.info("task::task %r arrived on %r", msg_id, eid)
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
# else:
# self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
self.tasks[eid].append(msg_id)
# self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
try:
self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
except Exception:
self.log.error("DB Error saving task destination %r", msg_id, exc_info=True)
def mia_task_request(self, idents, msg):
raise NotImplementedError
client_id = idents[0]
# content = dict(mia=self.mia,status='ok')
# self.session.send('mia_reply', content=content, idents=client_id)
#--------------------- IOPub Traffic ------------------------------
def save_iopub_message(self, topics, msg):
"""save an iopub message into the db"""
# print (topics)
try:
msg = self.session.unserialize(msg, content=True)
except Exception:
self.log.error("iopub::invalid IOPub message", exc_info=True)
return
parent = msg['parent_header']
if not parent:
self.log.warn("iopub::IOPub message lacks parent: %r", msg)
return
msg_id = parent['msg_id']
msg_type = msg['header']['msg_type']
content = msg['content']
# ensure msg_id is in db
try:
rec = self.db.get_record(msg_id)
except KeyError:
rec = empty_record()
rec['msg_id'] = msg_id
self.db.add_record(msg_id, rec)
# stream
d = {}
if msg_type == 'stream':
name = content['name']
s = rec[name] or ''
d[name] = s + content['data']
elif msg_type == 'pyerr':
d['pyerr'] = content
elif msg_type == 'pyin':
d['pyin'] = content['code']
elif msg_type in ('display_data', 'pyout'):
d[msg_type] = content
elif msg_type == 'status':
pass
elif msg_type == 'data_pub':
self.log.info("ignored data_pub message for %s" % msg_id)
else:
self.log.warn("unhandled iopub msg_type: %r", msg_type)
if not d:
return
try:
self.db.update_record(msg_id, d)
except Exception:
self.log.error("DB Error saving iopub message %r", msg_id, exc_info=True)
#-------------------------------------------------------------------------
# Registration requests
#-------------------------------------------------------------------------
def connection_request(self, client_id, msg):
"""Reply with connection addresses for clients."""
self.log.info("client::client %r connected", client_id)
content = dict(status='ok')
jsonable = {}
for k,v in iteritems(self.keytable):
if v not in self.dead_engines:
jsonable[str(k)] = v
content['engines'] = jsonable
self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
def register_engine(self, reg, msg):
"""Register a new engine."""
content = msg['content']
try:
uuid = content['uuid']
except KeyError:
self.log.error("registration::queue not specified", exc_info=True)
return
eid = self._next_id
self.log.debug("registration::register_engine(%i, %r)", eid, uuid)
content = dict(id=eid,status='ok',hb_period=self.heartmonitor.period)
# check if requesting available IDs:
if cast_bytes(uuid) in self.by_ident:
try:
raise KeyError("uuid %r in use" % uuid)
except:
content = error.wrap_exception()
self.log.error("uuid %r in use", uuid, exc_info=True)
else:
for h, ec in iteritems(self.incoming_registrations):
if uuid == h:
try:
raise KeyError("heart_id %r in use" % uuid)
except:
self.log.error("heart_id %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
elif uuid == ec.uuid:
try:
raise KeyError("uuid %r in use" % uuid)
except:
self.log.error("uuid %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
msg = self.session.send(self.query, "registration_reply",
content=content,
ident=reg)
heart = cast_bytes(uuid)
if content['status'] == 'ok':
if heart in self.heartmonitor.hearts:
# already beating
self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid)
self.finish_registration(heart)
else:
purge = lambda : self._purge_stalled_registration(heart)
dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
dc.start()
self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid,stallback=dc)
else:
self.log.error("registration::registration %i failed: %r", eid, content['evalue'])
return eid
def unregister_engine(self, ident, msg):
"""Unregister an engine that explicitly requested to leave."""
try:
eid = msg['content']['id']
except:
self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True)
return
self.log.info("registration::unregister_engine(%r)", eid)
# print (eid)
uuid = self.keytable[eid]
content=dict(id=eid, uuid=uuid)
self.dead_engines.add(uuid)
# self.ids.remove(eid)
# uuid = self.keytable.pop(eid)
#
# ec = self.engines.pop(eid)
# self.hearts.pop(ec.heartbeat)
# self.by_ident.pop(ec.queue)
# self.completed.pop(eid)
handleit = lambda : self._handle_stranded_msgs(eid, uuid)
dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop)
dc.start()
############## TODO: HANDLE IT ################
self._save_engine_state()
if self.notifier:
self.session.send(self.notifier, "unregistration_notification", content=content)
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
that the result failed and later receive the actual result.
"""
outstanding = self.queues[eid]
for msg_id in outstanding:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
try:
raise error.EngineError("Engine %r died while running task %r" % (eid, msg_id))
except:
content = error.wrap_exception()
# build a fake header:
header = {}
header['engine'] = uuid
header['date'] = datetime.now()
rec = dict(result_content=content, result_header=header, result_buffers=[])
rec['completed'] = header['date']
rec['engine_uuid'] = uuid
try:
self.db.update_record(msg_id, rec)
except Exception:
self.log.error("DB Error handling stranded msg %r", msg_id, exc_info=True)
def finish_registration(self, heart):
"""Second half of engine registration, called after our HeartMonitor
has received a beat from the Engine's Heart."""
try:
ec = self.incoming_registrations.pop(heart)
except KeyError:
self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
return
self.log.info("registration::finished registering engine %i:%s", ec.id, ec.uuid)
if ec.stallback is not None:
ec.stallback.stop()
eid = ec.id
self.ids.add(eid)
self.keytable[eid] = ec.uuid
self.engines[eid] = ec
self.by_ident[cast_bytes(ec.uuid)] = ec.id
self.queues[eid] = list()
self.tasks[eid] = list()
self.completed[eid] = list()
self.hearts[heart] = eid
content = dict(id=eid, uuid=self.engines[eid].uuid)
if self.notifier:
self.session.send(self.notifier, "registration_notification", content=content)
self.log.info("engine::Engine Connected: %i", eid)
self._save_engine_state()
def _purge_stalled_registration(self, heart):
if heart in self.incoming_registrations:
ec = self.incoming_registrations.pop(heart)
self.log.info("registration::purging stalled registration: %i", ec.id)
else:
pass
#-------------------------------------------------------------------------
# Engine State
#-------------------------------------------------------------------------
def _cleanup_engine_state_file(self):
"""cleanup engine state mapping"""
if os.path.exists(self.engine_state_file):
self.log.debug("cleaning up engine state: %s", self.engine_state_file)
try:
os.remove(self.engine_state_file)
except IOError:
self.log.error("Couldn't cleanup file: %s", self.engine_state_file, exc_info=True)
def _save_engine_state(self):
"""save engine mapping to JSON file"""
if not self.engine_state_file:
return
self.log.debug("save engine state to %s" % self.engine_state_file)
state = {}
engines = {}
for eid, ec in iteritems(self.engines):
if ec.uuid not in self.dead_engines:
engines[eid] = ec.uuid
state['engines'] = engines
state['next_id'] = self._idcounter
with open(self.engine_state_file, 'w') as f:
json.dump(state, f)
def _load_engine_state(self):
"""load engine mapping from JSON file"""
if not os.path.exists(self.engine_state_file):
return
self.log.info("loading engine state from %s" % self.engine_state_file)
with open(self.engine_state_file) as f:
state = json.load(f)
save_notifier = self.notifier
self.notifier = None
for eid, uuid in iteritems(state['engines']):
heart = uuid.encode('ascii')
# start with this heart as current and beating:
self.heartmonitor.responses.add(heart)
self.heartmonitor.hearts.add(heart)
self.incoming_registrations[heart] = EngineConnector(id=int(eid), uuid=uuid)
self.finish_registration(heart)
self.notifier = save_notifier
self._idcounter = state['next_id']
#-------------------------------------------------------------------------
# Client Requests
#-------------------------------------------------------------------------
def shutdown_request(self, client_id, msg):
"""handle shutdown request."""
self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
# also notify other clients of shutdown
self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'})
dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
dc.start()
def _shutdown(self):
self.log.info("hub::hub shutting down.")
time.sleep(0.1)
sys.exit(0)
def check_load(self, client_id, msg):
content = msg['content']
try:
targets = content['targets']
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
content = dict(status='ok')
# loads = {}
for t in targets:
content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
self.session.send(self.query, "load_reply", content=content, ident=client_id)
def queue_status(self, client_id, msg):
"""Return the Queue status of one or more targets.
If verbose, return the msg_ids, else return len of each type.
Keys:
* queue (pending MUX jobs)
* tasks (pending Task jobs)
* completed (finished jobs from both queues)
"""
content = msg['content']
targets = content['targets']
try:
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
verbose = content.get('verbose', False)
content = dict(status='ok')
for t in targets:
queue = self.queues[t]
completed = self.completed[t]
tasks = self.tasks[t]
if not verbose:
queue = len(queue)
completed = len(completed)
tasks = len(tasks)
content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned)
# print (content)
self.session.send(self.query, "queue_reply", content=content, ident=client_id)
def purge_results(self, client_id, msg):
"""Purge results from memory. This method is more valuable before we move
to a DB based message storage mechanism."""
content = msg['content']
self.log.info("Dropping records with %s", content)
msg_ids = content.get('msg_ids', [])
reply = dict(status='ok')
if msg_ids == 'all':
try:
self.db.drop_matching_records(dict(completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
pending = [m for m in msg_ids if (m in self.pending)]
if pending:
try:
raise IndexError("msg pending: %r" % pending[0])
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
try:
self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
if reply['status'] == 'ok':
eids = content.get('engine_ids', [])
for eid in eids:
if eid not in self.engines:
try:
raise IndexError("No such engine: %i" % eid)
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
uid = self.engines[eid].uuid
try:
self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
def resubmit_task(self, client_id, msg):
"""Resubmit one or more tasks."""
def finish(reply):
self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id)
content = msg['content']
msg_ids = content['msg_ids']
reply = dict(status='ok')
try:
records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[
'header', 'content', 'buffers'])
except Exception:
self.log.error('db::db error finding tasks to resubmit', exc_info=True)
return finish(error.wrap_exception())
# validate msg_ids
found_ids = [ rec['msg_id'] for rec in records ]
pending_ids = [ msg_id for msg_id in found_ids if msg_id in self.pending ]
if len(records) > len(msg_ids):
try:
raise RuntimeError("DB appears to be in an inconsistent state."
"More matching records were found than should exist")
except Exception:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif len(records) < len(msg_ids):
missing = [ m for m in msg_ids if m not in found_ids ]
try:
raise KeyError("No such msg(s): %r" % missing)
except KeyError:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif pending_ids:
pass
# no need to raise on resubmit of pending task, now that we
# resubmit under new ID, but do we want to raise anyway?
# msg_id = invalid_ids[0]
# try:
# raise ValueError("Task(s) %r appears to be inflight" % )
# except Exception:
# return finish(error.wrap_exception())
# mapping of original IDs to resubmitted IDs
resubmitted = {}
# send the messages
for rec in records:
header = rec['header']
msg = self.session.msg(header['msg_type'], parent=header)
msg_id = msg['msg_id']
msg['content'] = rec['content']
# use the old header, but update msg_id and timestamp
fresh = msg['header']
header['msg_id'] = fresh['msg_id']
header['date'] = fresh['date']
msg['header'] = header
self.session.send(self.resubmit, msg, buffers=rec['buffers'])
resubmitted[rec['msg_id']] = msg_id
self.pending.add(msg_id)
msg['buffers'] = rec['buffers']
try:
self.db.add_record(msg_id, init_record(msg))
except Exception:
self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
return finish(error.wrap_exception())
finish(dict(status='ok', resubmitted=resubmitted))
# store the new IDs in the Task DB
for msg_id, resubmit_id in iteritems(resubmitted):
try:
self.db.update_record(msg_id, {'resubmitted' : resubmit_id})
except Exception:
self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
def _extract_record(self, rec):
"""decompose a TaskRecord dict into subsection of reply for get_result"""
io_dict = {}
for key in ('pyin', 'pyout', 'pyerr', 'stdout', 'stderr'):
io_dict[key] = rec[key]
content = {
'header': rec['header'],
'metadata': rec['metadata'],
'result_metadata': rec['result_metadata'],
'result_header' : rec['result_header'],
'result_content': rec['result_content'],
'received' : rec['received'],
'io' : io_dict,
}
if rec['result_buffers']:
buffers = list(map(bytes, rec['result_buffers']))
else:
buffers = []
return content, buffers
def get_results(self, client_id, msg):
"""Get the result of 1 or more messages."""
content = msg['content']
msg_ids = sorted(set(content['msg_ids']))
statusonly = content.get('status_only', False)
pending = []
completed = []
content = dict(status='ok')
content['pending'] = pending
content['completed'] = completed
buffers = []
if not statusonly:
try:
matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
# turn match list into dict, for faster lookup
records = {}
for rec in matches:
records[rec['msg_id']] = rec
except Exception:
content = error.wrap_exception()
self.log.exception("Failed to get results")
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id)
return
else:
records = {}
for msg_id in msg_ids:
if msg_id in self.pending:
pending.append(msg_id)
elif msg_id in self.all_completed:
completed.append(msg_id)
if not statusonly:
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
elif msg_id in records:
if rec['completed']:
completed.append(msg_id)
c,bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
else:
pending.append(msg_id)
else:
try:
raise KeyError('No such message: '+msg_id)
except:
content = error.wrap_exception()
break
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
def get_history(self, client_id, msg):
"""Get a list of all msg_ids in our DB records"""
try:
msg_ids = self.db.get_history()
except Exception as e:
content = error.wrap_exception()
self.log.exception("Failed to get history")
else:
content = dict(status='ok', history=msg_ids)
self.session.send(self.query, "history_reply", content=content,
parent=msg, ident=client_id)
def db_query(self, client_id, msg):
"""Perform a raw query on the task record database."""
content = msg['content']
query = extract_dates(content.get('query', {}))
keys = content.get('keys', None)
buffers = []
empty = list()
try:
records = self.db.find_records(query, keys)
except Exception as e:
content = error.wrap_exception()
self.log.exception("DB query failed")
else:
# extract buffers from reply content:
if keys is not None:
buffer_lens = [] if 'buffers' in keys else None
result_buffer_lens = [] if 'result_buffers' in keys else None
else:
buffer_lens = None
result_buffer_lens = None
for rec in records:
# buffers may be None, so double check
b = rec.pop('buffers', empty) or empty
if buffer_lens is not None:
buffer_lens.append(len(b))
buffers.extend(b)
rb = rec.pop('result_buffers', empty) or empty
if result_buffer_lens is not None:
result_buffer_lens.append(len(rb))
buffers.extend(rb)
content = dict(status='ok', records=records, buffer_lens=buffer_lens,
result_buffer_lens=result_buffer_lens)
# self.log.debug (content)
self.session.send(self.query, "db_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
|
{
"content_hash": "5e935a0d8293ead2cc3b3b9d6a47d6a5",
"timestamp": "",
"source": "github",
"line_count": 1449,
"max_line_length": 118,
"avg_line_length": 37.893029675638374,
"alnum_prop": 0.537308539894731,
"repo_name": "Lightmatter/django-inlineformfield",
"id": "ee89133f8db7bc2a8c2e3aa9d92ad0e17cc65314",
"size": "54907",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/parallel/controller/hub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
}
|
from optparse import OptionGroup
import os
import sys
import tarfile
try:
from gppylib import gplog, pgconf
from gppylib.commands import gp
from gppylib.commands.base import Command, ExecutionError
from gppylib.db import dbconn
from gppylib.gparray import GpArray
from gppylib.gpversion import GpVersion
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.mainUtils import addMasterDirectoryOptionForSingleClusterProgram, addStandardLoggingAndHelpOptions, ExceptionNoStackTraceNeeded
from gppylib.operations.package import MigratePackages, InstallPackage, UninstallPackage, QueryPackage, BuildGppkg, UpdatePackage, CleanGppkg, Gppkg, GPPKG_EXTENSION, GPPKG_ARCHIVE_PATH
from gppylib.userinput import ask_yesno
from gppylib.operations.unix import ListFilesByPattern
import platform
except ImportError, ex:
sys.exit('Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(ex))
logger = gplog.get_default_logger()
class GpPkgProgram:
""" This is the CLI entry point to package management code. """
def __init__(self, options, args):
self.master_datadir = options.masterDataDirectory
# TODO: AK: Program logic should not be dictating master, standby, and segment information
# In other words, the fundamental Operations should have APIs that preclude the need for this.
self.master_host = None
self.standby_host = None
self.segment_host_list = None
self.query = options.query
self.build = options.build
self.install = options.install
self.remove = options.remove
self.update = options.update
self.clean = options.clean
self.migrate = options.migrate
self.interactive = options.interactive
self.filename = options.filename
# only one of the following may be provided: --install, --remove, --update, --query, --build, --clean, --migrate
count = sum([1 for opt in ['install', 'remove', 'update', 'query', 'build', 'clean', 'migrate'] if getattr(self, opt)])
if count != 1:
raise ExceptionNoStackTraceNeeded('Exactly one of the following must be provided: --install, --remove, -update, --query, --clean, --migrate')
if self.query:
# gppkg -q can be supplemented with --info, --list, --all
count = sum([1 for opt in ['info', 'list', 'all'] if options.__dict__[opt]])
if count > 1:
raise ExceptionNoStackTraceNeeded('For --query, at most one of the following can be provided: --info, --list, --all')
# for all query options other than --all, a package path must be provided
if not options.all and len(args) != 1:
raise ExceptionNoStackTraceNeeded('A package must be specified for -q, -q --info, and -q --list.')
if options.info:
self.query = (QueryPackage.INFO, args[0])
elif options.list:
self.query = (QueryPackage.LIST, args[0])
elif options.all:
self.query = (QueryPackage.ALL, None)
else:
self.query = (None, args[0])
elif self.migrate:
if len(args) != 2:
raise ExceptionNoStackTraceNeeded('Invalid syntax, expecting "gppkg --migrate <from_gphome> <to_gphome>".')
self.migrate = (args[0], args[1])
# gppkg should check gpexpand status unless in build mode.
#
# Build mode does not use any information from the cluster and does not
# affect its running status, in fact it does not require a cluster
# exists at all.
if not self.build:
check_result, msg = gp.conflict_with_gpexpand("gppkg",
refuse_phase1=True,
refuse_phase2=False)
if not check_result:
raise ExceptionNoStackTraceNeeded(msg)
@staticmethod
def create_parser():
parser = OptParser(option_class=OptChecker,
description="Greenplum Package Manager",
version='%prog version $Revision: #1 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
parser.remove_option('-q')
parser.remove_option('-l')
add_to = OptionGroup(parser, 'General Options')
parser.add_option_group(add_to)
addMasterDirectoryOptionForSingleClusterProgram(add_to)
# TODO: AK: Eventually, these options may need to be flexible enough to accept multiple packages
# in one invocation. If so, the structure of this parser may need to change.
add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>')
add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>')
add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>')
add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true')
add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>')
add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true')
add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False)
add_to.add_option('-f', '--filename', help='set specific package name', metavar='<name>')
add_to = OptionGroup(parser, 'Query Options')
parser.add_option_group(add_to)
add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description')
add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg')
add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg')
return parser
@staticmethod
def create_program(options, args):
""" TODO: AK: This convention may be unnecessary. """
return GpPkgProgram(options, args)
def _get_gpdb_host_list(self):
"""
TODO: Perhaps the host list should be produced by gparray instead of here.
This method gets the host names
of all hosts in the gpdb array.
It sets the following variables
GpPkgProgram.master_host to master
GpPkgProgram.standby_host to standby
GpPkgProgram.segment_host_list to segment hosts
"""
logger.debug('_get_gpdb_host_list')
gparr = GpArray.initFromCatalog(dbconn.DbURL(port = self.master_port), utility = True)
master_host = None
standby_host = None
segment_host_list = []
segs = gparr.getDbList()
for seg in segs:
if seg.isSegmentMaster(current_role = True):
master_host = seg.getSegmentHostName()
elif seg.isSegmentStandby(current_role = True):
standby_host = seg.getSegmentHostName()
else:
segment_host_list.append(seg.getSegmentHostName())
# Deduplicate the hosts so that we
# dont install multiple times on the same host
segment_host_list = list(set(segment_host_list))
# Segments might exist on the master host. Since we store the
# master host separately in self.master_host, storing the master_host
# in the segment_host_list is redundant.
for host in segment_host_list:
if host == master_host or host == standby_host:
segment_host_list.remove(host)
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def _get_master_port(self, datadir):
'''
Obtain the master port from the pgconf file
'''
logger.debug('_get_master_port')
pgconf_dict = pgconf.readfile(os.path.join(datadir, 'postgresql.conf'))
return pgconf_dict.int('port') or os.getenv('PGPORT')
def run(self):
if self.build:
if self.filename:
BuildGppkg(self.build, self.filename).run()
else:
BuildGppkg(self.build, None).run()
return
if platform.linux_distribution()[0] == 'Ubuntu':
try:
cmd = Command(name='Check for dpkg', cmdStr='dpkg --version')
cmd.run(validateAfter=True)
cmd = Command(name='Check for fakeroot', cmdStr='fakeroot --version')
cmd.run(validateAfter=True)
except Exception, ex:
raise ExceptionNoStackTraceNeeded('fakeroot and dpkg are both required by gppkg')
else:
try:
cmd = Command(name = 'Check for rpm', cmdStr = 'rpm --version')
cmd.run(validateAfter = True)
results = cmd.get_results().stdout.strip()
rpm_version_string = results.split(' ')[-1]
if not rpm_version_string.startswith('4.'):
raise ExceptionNoStackTraceNeeded('gppkg requires rpm version 4.x')
except ExecutionError, ex:
results = ex.cmd.get_results().stderr.strip()
if len(results) != 0 and 'not found' in results:
raise ExceptionNoStackTraceNeeded('gppkg requires RPM to be available in PATH')
if self.master_datadir is None:
self.master_datadir = gp.get_masterdatadir()
self.master_port = self._get_master_port(self.master_datadir)
self._get_gpdb_host_list()
if self.migrate:
MigratePackages(from_gphome = self.migrate[0],
to_gphome = self.migrate[1],
standby_host = self.standby_host,
segment_host_list = self.segment_host_list
).run()
return
if self.install:
pkg = Gppkg.from_package_path(self.install)
InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run()
elif self.query:
query_type, package_path = self.query
QueryPackage(query_type, package_path).run()
elif self.remove:
# Check for exact match first, then use wildcard for what will be removed.
pkg_file_list = ListFilesByPattern(GPPKG_ARCHIVE_PATH, self.remove + GPPKG_EXTENSION).run()
if len(pkg_file_list) == 0:
# now try wildcard
pkg_file_list = ListFilesByPattern(GPPKG_ARCHIVE_PATH, self.remove + '*' + GPPKG_EXTENSION).run()
if len(pkg_file_list) == 0:
raise ExceptionNoStackTraceNeeded('Package %s has not been installed.' % self.remove)
# refuse to remove at all if the match is too broad, i.e., > 1
if len(pkg_file_list) > 1:
err_msg = "Remove request '%s' too broad. " \
"Multiple packages match remove request: ( %s )." % (self.remove, ", ".join(pkg_file_list))
raise ExceptionNoStackTraceNeeded(err_msg)
pkg_file = pkg_file_list[0]
pkg = Gppkg.from_package_path(os.path.join(GPPKG_ARCHIVE_PATH, pkg_file))
UninstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run()
elif self.update:
logger.warning('WARNING: The process of updating a package includes removing all')
logger.warning('previous versions of the system objects related to the package. For')
logger.warning('example, previous versions of shared libraries are removed.')
logger.warning('After the update process, a database function will fail when it is')
logger.warning('called if the function references a package file that has been removed.')
if self.interactive:
if not ask_yesno(None, 'Do you still want to continue ?', 'N'):
logger.info('Skipping update of gppkg based on user input')
return
pkg = Gppkg.from_package_path(self.update)
UpdatePackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run()
elif self.clean:
CleanGppkg(self.standby_host, self.segment_host_list).run()
def cleanup(self):
pass
|
{
"content_hash": "eb4e2dfceddce31b057240b20d43efd6",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 189,
"avg_line_length": 47.698113207547166,
"alnum_prop": 0.6105221518987342,
"repo_name": "jmcatamney/gpdb",
"id": "427982290b4449774900d848971fff3dd67bd0ea",
"size": "12738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/programs/gppkg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12854"
},
{
"name": "C",
"bytes": "42498841"
},
{
"name": "C++",
"bytes": "14366259"
},
{
"name": "CMake",
"bytes": "38452"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11932"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "335208"
},
{
"name": "HTML",
"bytes": "53484"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229556"
},
{
"name": "M4",
"bytes": "111147"
},
{
"name": "Makefile",
"bytes": "496239"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLpgSQL",
"bytes": "8009512"
},
{
"name": "Perl",
"bytes": "798767"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3000118"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "77585"
},
{
"name": "SCSS",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "451713"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "674092"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
"""tplink conftest."""
import pytest
from . import _patch_discovery
from tests.common import mock_device_registry, mock_registry
@pytest.fixture
def mock_discovery():
"""Mock python-kasa discovery."""
with _patch_discovery() as mock_discover:
mock_discover.return_value = {}
yield mock_discover
@pytest.fixture(name="device_reg")
def device_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture(name="entity_reg")
def entity_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture(autouse=True)
def tplink_mock_get_source_ip(mock_get_source_ip):
"""Mock network util's async_get_source_ip."""
|
{
"content_hash": "435ee084642d5d041b492b5142813001",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 23.46875,
"alnum_prop": 0.6950732356857523,
"repo_name": "jawilson/home-assistant",
"id": "20ce09b9ec8533fff6b8bf3342ccdd146802289c",
"size": "751",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/components/tplink/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import datetime
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.decorators.cache import cache_control
from django.views.generic import FormView, TemplateView, DetailView
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from constituencies.models import Constituency
from leaflets.models import Leaflet, LeafletImage
from people.models import Person
from uk_political_parties.models import Party
from .forms import ReportAbuseForm
from .helpers import CacheControlMixin
class HomeView(CacheControlMixin, TemplateView):
template_name = "core/home.html"
cache_timeout = 60 * 5
def get_context_data(self, **kwargs):
leaflet_count = Leaflet.objects.all().count()
context = super(HomeView, self).get_context_data(**kwargs)
# get the latest leaflets (with titles)
latest_leaflets = Leaflet.objects.all().prefetch_related("images")[:20]
# update context
context.update(
{
"leaflet_count": leaflet_count,
"latest_leaflets": latest_leaflets,
}
)
return context
class MaintenanceView(TemplateView):
template_name = "core/maintenance.html"
class ReportView(DetailView, FormView):
model = Leaflet
form_class = ReportAbuseForm
template_name = "core/report_abuse.html"
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
self.object = self.get_object()
context = self.get_context_data(form=form, object=self.object,)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(ReportView, self).post(request, *args, **kwargs)
def form_valid(self, form):
domain = Site.objects.get_current().domain
ctx = {
"link": "http://%s%s"
% (domain, reverse("leaflet", kwargs={"pk": self.object.id}),),
"name": form.cleaned_data["name"],
"email": form.cleaned_data["email"],
"details": form.cleaned_data["details"],
}
subject = "{0} – {1}".format(
settings.REPORT_EMAIL_SUBJECT, self.object.id
)
from_email = settings.DEFAULT_FROM_EMAIL
to = settings.EMAIL_RECIPIENT
text_content = render_to_string("email/abuse_report.txt", ctx)
html_content = render_to_string("email/abuse_report.html", ctx)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
return HttpResponseRedirect(
reverse("report_abuse_sent", kwargs={"pk": self.object.pk})
)
class ReportThanksView(DetailView):
model = Leaflet
template_name = "core/report_sent.html"
class TestView(TemplateView):
template_name = "core/test.html"
def get_context_data(self, **kwargs):
links = []
links.append({"text": "Home", "url": reverse("home")})
links.append({"text": "About", "url": reverse("about")})
links.append({"text": "Analysis", "url": reverse("analysis")})
links.append(
{"text": "Analysis Reports", "url": reverse("report_view")}
)
links.append(
{"text": "Analysis in total", "url": reverse("analysis_report")}
)
links.append(
{
"text": "Analysis per party",
"url": reverse("analysis_report_per_party"),
}
)
links.append(
{
"text": "Analysis per constituency",
"url": reverse("constituencies_report"),
}
)
links.append(
{
"text": "Start Analysis",
"url": reverse("analysis_start"),
"help": "should redirect to a random un-analysed leaflet",
}
)
links.append(
{"text": "Constituencies", "url": reverse("constituencies_report")}
)
constituency = Constituency.objects.order_by("?").first()
links.append(
{
"text": "Constituency view",
"url": reverse(
"constituency-view", kwargs={"pk": constituency.pk}
),
}
)
links.append(
{
"text": "Donate",
"url": "/donate",
"help": "should redirect to /donate on DC website",
}
)
leaflet = Leaflet.objects.order_by("?").first()
image = LeafletImage.objects.order_by("?").first()
links.append({"text": "Latest leaflets", "url": reverse("leaflets")})
links.append(
{
"text": "Leaflet view",
"url": reverse("leaflet", kwargs={"pk": leaflet.pk}),
}
)
links.append(
{
"text": "Leaflet images",
"url": reverse("all_images", kwargs={"pk": leaflet.pk}),
}
)
links.append(
{
"text": "Full leaflet image",
"url": reverse("full_image", kwargs={"pk": image.pk}),
}
)
links.append({"text": "Add leaflet", "url": reverse("upload_leaflet")})
links.append(
{
"text": "Report a leaflet",
"url": reverse("report_abuse", kwargs={"pk": leaflet.pk}),
}
)
links.append(
{
"text": "Report a leaflet - thanks",
"url": reverse("report_abuse_sent", kwargs={"pk": leaflet.pk}),
}
)
party = Party.objects.order_by("?").first()
links.append({"text": "Parties", "url": reverse("parties")})
links.append(
{
"text": "Party view",
"url": reverse("party-view", kwargs={"pk": party.pk}),
}
)
person = Person.objects.order_by("?").first()
links.append(
{
"text": "Person view",
"url": reverse("person", kwargs={"remote_id": person.pk}),
}
)
links.append({"text": "Press", "url": reverse("press")})
context = super(TestView, self).get_context_data(**kwargs)
context.update({"links": links})
return context
|
{
"content_hash": "67314d1d801e4939a55cb6351aec52df",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 79,
"avg_line_length": 31.412322274881518,
"alnum_prop": 0.5416415208207604,
"repo_name": "DemocracyClub/electionleaflets",
"id": "c4760e0abd0266f7fca6bdc2c3f4e574c041bd3b",
"size": "6654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "7910"
},
{
"name": "HTML",
"bytes": "92760"
},
{
"name": "JavaScript",
"bytes": "5712"
},
{
"name": "Makefile",
"bytes": "2940"
},
{
"name": "Python",
"bytes": "194406"
},
{
"name": "SCSS",
"bytes": "12241"
}
],
"symlink_target": ""
}
|
class ConfTrakException(Exception):
pass
class ConfTrakNotFoundException(ConfTrakException, RuntimeError):
pass
|
{
"content_hash": "2c50a3500e655f2893a9b8c15c6e1699",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.8032786885245902,
"repo_name": "hhslepicka/conftrak",
"id": "f8954b12be4f40fdc2ef4a23afa39507874a83ea",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftrak/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "121978"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class BackupResourceVaultConfigResource(Resource):
"""Backup resource vault config details.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id represents the complete path to the resource.
:vartype id: str
:ivar name: Resource name associated with the resource.
:vartype name: str
:ivar type: Resource type represents the complete path of the form
Namespace/ResourceType/ResourceType/...
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param e_tag: Optional ETag.
:type e_tag: str
:param properties: BackupResourceVaultConfigResource properties
:type properties: :class:`BackupResourceVaultConfig
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfig>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BackupResourceVaultConfig'},
}
def __init__(self, location=None, tags=None, e_tag=None, properties=None):
super(BackupResourceVaultConfigResource, self).__init__(location=location, tags=tags, e_tag=e_tag)
self.properties = properties
|
{
"content_hash": "766b900811c41e96600f19ea86d004f1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 106,
"avg_line_length": 36.608695652173914,
"alnum_prop": 0.6300475059382423,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "71e6eaaaa35eb95074ff0bbe417591bd1441f207",
"size": "2158",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/backup_resource_vault_config_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from utility.sklearnbasemodel import BaseModel
import numpy as np
from sklearn.ensemble import BaggingRegressor
class BaggingModel(BaseModel):
def __init__(self):
BaseModel.__init__(self)
return
def setClf(self):
# min_samples_split = 3
self.clf = BaggingRegressor(n_estimators = 100, max_samples =0.5, max_features =0.5, verbose = 100)
return
def getTunedParamterOptions(self):
tuned_parameters = [{'min_samples_split': np.arange(2, 1000, 1)}]
# tuned_parameters = [{'min_samples_split': [5, 8,10,12]}]
# tuned_parameters = [{'min_samples_split': [5, 10]}]
return tuned_parameters
if __name__ == "__main__":
obj= BaggingModel()
obj.run()
|
{
"content_hash": "14eb64065170a5ee420b8622f163febf",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 107,
"avg_line_length": 28.642857142857142,
"alnum_prop": 0.628428927680798,
"repo_name": "LevinJ/Supply-demand-forecasting",
"id": "d028c891e7b9c5748f06cb64d0b5a7f80f33b072",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "implement/baggingmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146461"
}
],
"symlink_target": ""
}
|
"""
login-oauth.py
~~~~~~~~~~~~~~
This module is an example of how to harness the Readability API w/ OAuth.
This module expects the following environment variables to be set:
- READABILITY_CONSUMER_KEY
- READABILITY_CONSUMER_SECRET
"""
import sys
import webbrowser
from ext import readability, get_consumer_keys
USAGE = """
Usage:
$ ./login-oauth.py <username> <password>
The following environment variables must be set:
- READABILITY_CONSUMER_KEY
- READABILITY_CONSUMER_SECRET
"""
TEMPLATE = """
To use the other example modules, run the following:
$ export READABILITY_ACCESS_TOKEN=%s
$ export READABILITY_ACCESS_SECRET=%s
"""
RAW_TEMPLATE = 'export READABILITY_ACCESS_TOKEN=%s ; export READABILITY_ACCESS_SECRET=%s'
def get_oauth_pin(url):
"""Grabs credentials from arguments."""
webbrowser.open(url)
return raw_input('Authorization PIN? ')
def main():
try:
c_key, c_secret = get_consumer_keys()
except ValueError:
print >> sys.stderr, 'READABILITY_CONSUMER_KEY and READABILITY_CONSUMER_SECRET must be set.'
sys.exit(1)
try:
rdd = readability.oauth(c_key, c_secret, callback=get_oauth_pin)
except readability.api.AuthenticationError:
print >> sys.stderr, '\nLogin failed. Invalid credentials.'
sys.exit(77)
if c_key and c_secret:
print 'Login successful!'
if '--raw' in sys.argv:
print RAW_TEMPLATE % (rdd.token_tuple)
else:
print TEMPLATE % (rdd.token_tuple)
if __name__ == '__main__':
main()
|
{
"content_hash": "fadec791ebb0e15cfe8e35ca7d4c2c94",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 100,
"avg_line_length": 19.632911392405063,
"alnum_prop": 0.6673114119922631,
"repo_name": "alexwaters/python-readability-api",
"id": "e99d0913698cdfeb96c4da053459911f85a45538",
"size": "1598",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/login-oauth.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import seq2seq
from seq2seq.models import SimpleSeq2Seq
model = SimpleSeq2Seq(input_dim=5, hidden_dim=10, output_length=8, output_dim=8)
model.compile(loss='mse', optimizer='rmsprop')
|
{
"content_hash": "a7c2eef5205fe8ff2e940a685e4f124b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 80,
"avg_line_length": 36.8,
"alnum_prop": 0.7880434782608695,
"repo_name": "zlpmichelle/crackingtensorflow",
"id": "26556d890c51b4fdbf6bf944fbd582ff89ae4a3d",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seq2seq/keras_context_vector/test_seq2seq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1109569"
},
{
"name": "Python",
"bytes": "583902"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
import urllib.request
from io import BytesIO
import pandas as pd
import ujson
from elasticsearch import Elasticsearch
csv = urllib.request.urlopen("https://docs.google.com/spreadsheet/pub?key=1h1udf_H073YaVlZs0fkYUf9dC6KbEZAhF1veeLExyXo&gid=937170620&output=csv").read()
bio = BytesIO(csv)
csv_pd = pd.DataFrame.from_csv(bio)
json_objs = csv_pd.reset_index().to_json(orient='records')
dict_array= ujson.loads(json_objs)
# Edit to point to elasticsearch instance
es = Elasticsearch('ec2-52-10-17-100.us-west-2.compute.amazonaws.com:9200')
# ignore 404 and 400
es.indices.delete(index='datasets_index', ignore=[400, 404])
for d in dict_array:
print(d)
res = es.index(index="datasets_index", doc_type="dataset", body=d)
print(res['created'])
|
{
"content_hash": "13f61e92e5d2210e6fd8f9f94b9d944d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 152,
"avg_line_length": 34.90909090909091,
"alnum_prop": 0.7408854166666666,
"repo_name": "bhillmann/dusty_coffin",
"id": "e3f8fcfaa798b44a89b56422805b75cf3c565256",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dusty_coffin/elasticsearch_custom/import_spreadsheet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2940"
},
{
"name": "CoffeeScript",
"bytes": "6158"
},
{
"name": "HTML",
"bytes": "38708"
},
{
"name": "JavaScript",
"bytes": "10349"
},
{
"name": "Nginx",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "2282"
},
{
"name": "Shell",
"bytes": "763"
}
],
"symlink_target": ""
}
|
import SimpleHTTPServer
import SocketServer
import traceback
import os
import sys
import pymysql.cursors
import subprocess
to_shm = os.path.abspath(os.path.dirname(os.path.abspath(__file__ + os.sep + '..')))
sys.path.append(to_shm)
import mysqldb
db = mysqldb.MysqlConn()
# read system arguments
if len(sys.argv) != 2:
print('Expecting an argument: filename')
input_path = sys.argv[1]
input_file = open(input_path, 'r')
for line in input_file:
try:
splitted = line.replace('\n', '').split('\t')
name = splitted[0]
text = splitted[1]
sql = 'INSERT INTO `OverBlockTraining`(`name`, `text`) VALUES (%s,%s)'
values = [name, text]
db.execute(sql, values)
except:
continue
|
{
"content_hash": "1d20d4ec4d05e47c9b5338e8e5394a46",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 84,
"avg_line_length": 22.323529411764707,
"alnum_prop": 0.6310935441370223,
"repo_name": "ke00n/alabno",
"id": "ef7090877540980ee51ef7fd9e147342d32cda54",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/overlapping-block-marker/UploadTrainingToDatabase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3234"
},
{
"name": "Go",
"bytes": "3664"
},
{
"name": "HTML",
"bytes": "21696"
},
{
"name": "Haskell",
"bytes": "9701"
},
{
"name": "Java",
"bytes": "230912"
},
{
"name": "JavaScript",
"bytes": "30201"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "66943"
},
{
"name": "Scala",
"bytes": "51282"
},
{
"name": "Shell",
"bytes": "1259"
}
],
"symlink_target": ""
}
|
"""Provides device triggers for binary sensors."""
import voluptuous as vol
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.const import (
CONF_TURNED_OFF,
CONF_TURNED_ON,
)
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import CONF_ENTITY_ID, CONF_FOR, CONF_TYPE
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import get_device_class
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_TAMPER,
DEVICE_CLASS_UPDATE,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
DOMAIN,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_BAT_LOW = "bat_low"
CONF_NOT_BAT_LOW = "not_bat_low"
CONF_CHARGING = "charging"
CONF_NOT_CHARGING = "not_charging"
CONF_COLD = "cold"
CONF_NOT_COLD = "not_cold"
CONF_CONNECTED = "connected"
CONF_NOT_CONNECTED = "not_connected"
CONF_GAS = "gas"
CONF_NO_GAS = "no_gas"
CONF_HOT = "hot"
CONF_NOT_HOT = "not_hot"
CONF_LIGHT = "light"
CONF_NO_LIGHT = "no_light"
CONF_LOCKED = "locked"
CONF_NOT_LOCKED = "not_locked"
CONF_MOIST = "moist"
CONF_NOT_MOIST = "not_moist"
CONF_MOTION = "motion"
CONF_NO_MOTION = "no_motion"
CONF_MOVING = "moving"
CONF_NOT_MOVING = "not_moving"
CONF_OCCUPIED = "occupied"
CONF_NOT_OCCUPIED = "not_occupied"
CONF_PLUGGED_IN = "plugged_in"
CONF_NOT_PLUGGED_IN = "not_plugged_in"
CONF_POWERED = "powered"
CONF_NOT_POWERED = "not_powered"
CONF_PRESENT = "present"
CONF_NOT_PRESENT = "not_present"
CONF_PROBLEM = "problem"
CONF_NO_PROBLEM = "no_problem"
CONF_UNSAFE = "unsafe"
CONF_NOT_UNSAFE = "not_unsafe"
CONF_SMOKE = "smoke"
CONF_NO_SMOKE = "no_smoke"
CONF_SOUND = "sound"
CONF_NO_SOUND = "no_sound"
CONF_TAMPERED = "tampered"
CONF_NOT_TAMPERED = "not_tampered"
CONF_UPDATE = "update"
CONF_NO_UPDATE = "no_update"
CONF_VIBRATION = "vibration"
CONF_NO_VIBRATION = "no_vibration"
CONF_OPENED = "opened"
CONF_NOT_OPENED = "not_opened"
TURNED_ON = [
CONF_BAT_LOW,
CONF_COLD,
CONF_CONNECTED,
CONF_GAS,
CONF_HOT,
CONF_LIGHT,
CONF_NOT_LOCKED,
CONF_MOIST,
CONF_MOTION,
CONF_MOVING,
CONF_OCCUPIED,
CONF_OPENED,
CONF_PLUGGED_IN,
CONF_POWERED,
CONF_PRESENT,
CONF_PROBLEM,
CONF_SMOKE,
CONF_SOUND,
CONF_UNSAFE,
CONF_UPDATE,
CONF_VIBRATION,
CONF_TAMPERED,
CONF_TURNED_ON,
]
TURNED_OFF = [
CONF_NOT_BAT_LOW,
CONF_NOT_COLD,
CONF_NOT_CONNECTED,
CONF_NOT_HOT,
CONF_LOCKED,
CONF_NOT_MOIST,
CONF_NOT_MOVING,
CONF_NOT_OCCUPIED,
CONF_NOT_OPENED,
CONF_NOT_PLUGGED_IN,
CONF_NOT_POWERED,
CONF_NOT_PRESENT,
CONF_NOT_TAMPERED,
CONF_NOT_UNSAFE,
CONF_NO_GAS,
CONF_NO_LIGHT,
CONF_NO_MOTION,
CONF_NO_PROBLEM,
CONF_NO_SMOKE,
CONF_NO_SOUND,
CONF_NO_VIBRATION,
CONF_TURNED_OFF,
]
ENTITY_TRIGGERS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BAT_LOW}, {CONF_TYPE: CONF_NOT_BAT_LOW}],
DEVICE_CLASS_BATTERY_CHARGING: [
{CONF_TYPE: CONF_CHARGING},
{CONF_TYPE: CONF_NOT_CHARGING},
],
DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_COLD}, {CONF_TYPE: CONF_NOT_COLD}],
DEVICE_CLASS_CONNECTIVITY: [
{CONF_TYPE: CONF_CONNECTED},
{CONF_TYPE: CONF_NOT_CONNECTED},
],
DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GARAGE_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_GAS}, {CONF_TYPE: CONF_NO_GAS}],
DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_HOT}, {CONF_TYPE: CONF_NOT_HOT}],
DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_LIGHT}, {CONF_TYPE: CONF_NO_LIGHT}],
DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_LOCKED}, {CONF_TYPE: CONF_NOT_LOCKED}],
DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_MOIST}, {CONF_TYPE: CONF_NOT_MOIST}],
DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_MOTION}, {CONF_TYPE: CONF_NO_MOTION}],
DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_MOVING}, {CONF_TYPE: CONF_NOT_MOVING}],
DEVICE_CLASS_OCCUPANCY: [
{CONF_TYPE: CONF_OCCUPIED},
{CONF_TYPE: CONF_NOT_OCCUPIED},
],
DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_PLUG: [{CONF_TYPE: CONF_PLUGGED_IN}, {CONF_TYPE: CONF_NOT_PLUGGED_IN}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWERED}, {CONF_TYPE: CONF_NOT_POWERED}],
DEVICE_CLASS_PRESENCE: [{CONF_TYPE: CONF_PRESENT}, {CONF_TYPE: CONF_NOT_PRESENT}],
DEVICE_CLASS_PROBLEM: [{CONF_TYPE: CONF_PROBLEM}, {CONF_TYPE: CONF_NO_PROBLEM}],
DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_UNSAFE}, {CONF_TYPE: CONF_NOT_UNSAFE}],
DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_SMOKE}, {CONF_TYPE: CONF_NO_SMOKE}],
DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_SOUND}, {CONF_TYPE: CONF_NO_SOUND}],
DEVICE_CLASS_UPDATE: [{CONF_TYPE: CONF_UPDATE}, {CONF_TYPE: CONF_NO_UPDATE}],
DEVICE_CLASS_TAMPER: [{CONF_TYPE: CONF_TAMPERED}, {CONF_TYPE: CONF_NOT_TAMPERED}],
DEVICE_CLASS_VIBRATION: [
{CONF_TYPE: CONF_VIBRATION},
{CONF_TYPE: CONF_NO_VIBRATION},
],
DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_TURNED_ON}, {CONF_TYPE: CONF_TURNED_OFF}],
}
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TURNED_OFF + TURNED_ON),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_type = config[CONF_TYPE]
if trigger_type in TURNED_ON:
to_state = "on"
else:
to_state = "off"
state_config = {
state_trigger.CONF_PLATFORM: "state",
state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = get_device_class(hass, entry.entity_id) or DEVICE_CLASS_NONE
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
|
{
"content_hash": "612e9065f7fb1c9d11a206559709076b",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 88,
"avg_line_length": 30.53639846743295,
"alnum_prop": 0.6558343789209535,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "f3eb1851247c4f818be35e4ce823909bddce3fa6",
"size": "7970",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/binary_sensor/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Tests for the noisy 2D Branin loss function."""
import os
import sys
import unittest
import tensorflow as tf
import numpy as np
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from deepobs.tensorflow import testproblems
class Two_d_BraninTest(unittest.TestCase):
"""Tests for the noisy 2D Branin loss function."""
def setUp(self):
"""Sets up the 2D dataset for the tests."""
self.batch_size = 100
self.two_d_branin = testproblems.two_d_branin(self.batch_size)
def test_init_ops(self):
"""Tests all three initialization operations."""
tf.reset_default_graph()
tf.set_random_seed(42)
self.two_d_branin.set_up()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_param = [
np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()
]
# Check if number of parameters per "layer" is equal to what we expect
# We will write them in the following form:
# - Conv layer: [input_filter*output_filter*kernel[0]*kernel[1]]
# - Batch norm: [input, input] (for beta and gamma)
# - Fully connected: [input*output]
# - Bias: [dim]
self.assertEqual(num_param, [
1, 1
])
for init_op in [
self.two_d_branin.train_init_op,
self.two_d_branin.test_init_op,
self.two_d_branin.train_eval_init_op
]:
sess.run(init_op)
losses_, regularizer_ = sess.run([
self.two_d_branin.losses, self.two_d_branin.regularizer
])
self.assertEqual(losses_.shape, (self.batch_size, ))
self.assertIsInstance(regularizer_, np.float32)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "abd568cb592dbe25c9ed1c381c6c6cbe",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 96,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.5604674796747967,
"repo_name": "fsschneider/DeepOBS",
"id": "14cb8d11219524ac60a4824834022fa4b90f1b99",
"size": "1992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testproblems/test_two_d_branin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "368026"
},
{
"name": "Shell",
"bytes": "8516"
}
],
"symlink_target": ""
}
|
from pyflink.table import DataTypes
from pyflink.table.udf import ScalarFunction, udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBlinkStreamTableTestCase, PyFlinkBlinkBatchTableTestCase
class UserDefinedFunctionTests(object):
def test_scalar_function(self):
# test lambda function
self.t_env.register_function(
"add_one", udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT()))
# test Python ScalarFunction
self.t_env.register_function(
"subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()))
# test Python function
self.t_env.register_function("add", add)
# test callable function
self.t_env.register_function(
"add_one_callable", udf(CallablePlus(), DataTypes.BIGINT(), DataTypes.BIGINT()))
def partial_func(col, param):
return col + param
# test partial function
import functools
self.t_env.register_function(
"add_one_partial",
udf(functools.partial(partial_func, param=1), DataTypes.BIGINT(), DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(),
DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where("add_one(b) <= 3") \
.select("add_one(a), subtract_one(b), add(a, c), add_one_callable(a), "
"add_one_partial(a), a") \
.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["2,1,4,2,2,1", "4,0,12,4,4,3"])
def test_chaining_scalar_function(self):
self.t_env.register_function(
"add_one", udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT()))
self.t_env.register_function(
"subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()))
self.t_env.register_function("add", add)
table_sink = source_sink_utils.TestAppendSink(['a'], [DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("add(add_one(a), subtract_one(b))") \
.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["3", "7", "4"])
def test_udf_in_join_condition(self):
t1 = self.t_env.from_elements([(2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
self.t_env.register_function("f", udf(lambda i: i, DataTypes.BIGINT(), DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where("f(a) = c").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["2,Hi,2,Flink"])
def test_udf_in_join_condition_2(self):
t1 = self.t_env.from_elements([(1, "Hi"), (2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
self.t_env.register_function("f", udf(lambda i: i, DataTypes.BIGINT(), DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where("f(a) = f(c)").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["2,Hi,2,Flink"])
def test_udf_with_constant_params(self):
def udf_with_constant_params(p, null_param, tinyint_param, smallint_param, int_param,
bigint_param, decimal_param, float_param, double_param,
boolean_param, str_param,
date_param, time_param, timestamp_param):
# decide whether two floats are equal
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
from decimal import Decimal
import datetime
assert null_param is None, 'null_param is wrong value %s' % null_param
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
p += tinyint_param
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
p += smallint_param
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
p += int_param
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
p += bigint_param
assert decimal_param == Decimal('1.05'), \
'decimal_param is wrong value %s ' % decimal_param
p += int(decimal_param)
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-06), \
'float_param is wrong value %s ' % float_param
p += int(float_param)
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-07), \
'double_param is wrong value %s ' % double_param
p += int(double_param)
assert boolean_param is True, 'boolean_param is wrong value %s' % boolean_param
assert str_param == 'flink', 'str_param is wrong value %s' % str_param
assert date_param == datetime.date(year=2014, month=9, day=13), \
'date_param is wrong value %s' % date_param
assert time_param == datetime.time(hour=12, minute=0, second=0), \
'time_param is wrong value %s' % time_param
assert timestamp_param == datetime.datetime(1999, 9, 10, 5, 20, 10), \
'timestamp_param is wrong value %s' % timestamp_param
return p
self.t_env.register_function("udf_with_constant_params",
udf(udf_with_constant_params,
input_types=[DataTypes.BIGINT(),
DataTypes.BIGINT(),
DataTypes.TINYINT(),
DataTypes.SMALLINT(),
DataTypes.INT(),
DataTypes.BIGINT(),
DataTypes.DECIMAL(20, 10),
DataTypes.FLOAT(),
DataTypes.DOUBLE(),
DataTypes.BOOLEAN(),
DataTypes.STRING(),
DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP()],
result_type=DataTypes.BIGINT()))
self.t_env.register_function(
"udf_with_all_constant_params", udf(lambda i, j: i + j,
[DataTypes.BIGINT(), DataTypes.BIGINT()],
DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
self.t_env.register_table("test_table", t)
self.t_env.sql_query("select udf_with_all_constant_params("
"cast (1 as BIGINT),"
"cast (2 as BIGINT)), "
"udf_with_constant_params(a, "
"cast (null as BIGINT),"
"cast (1 as TINYINT),"
"cast (1 as SMALLINT),"
"cast (1 as INT),"
"cast (1 as BIGINT),"
"cast (1.05 as DECIMAL),"
"cast (1.23 as FLOAT),"
"cast (1.98932 as DOUBLE),"
"true,"
"'flink',"
"cast ('2014-09-13' as DATE),"
"cast ('12:00:00' as TIME),"
"cast ('1999-9-10 05:20:10' as TIMESTAMP))"
" from test_table").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["3,8", "3,9", "3,10"])
def test_overwrite_builtin_function(self):
self.t_env.register_function(
"plus", udf(lambda i, j: i + j - 1,
[DataTypes.BIGINT(), DataTypes.BIGINT()], DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a'], [DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select("plus(a, b)").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["2", "6", "3"])
def test_open(self):
self.t_env.register_function(
"subtract", udf(Subtract(), DataTypes.BIGINT(), DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 4)], ['a', 'b'])
t.select("a, subtract(b)").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,1", "2,4", "3,3"])
def test_deterministic(self):
add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
self.assertTrue(add_one._deterministic)
add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), deterministic=False)
self.assertFalse(add_one._deterministic)
subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
self.assertTrue(subtract_one._deterministic)
with self.assertRaises(ValueError, msg="Inconsistent deterministic: False and True"):
udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT(), deterministic=False)
self.assertTrue(add._deterministic)
@udf(input_types=DataTypes.BIGINT(), result_type=DataTypes.BIGINT(), deterministic=False)
def non_deterministic_udf(i):
return i
self.assertFalse(non_deterministic_udf._deterministic)
def test_name(self):
add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
self.assertEqual("<lambda>", add_one._name)
add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), name="add_one")
self.assertEqual("add_one", add_one._name)
subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
self.assertEqual("SubtractOne", subtract_one._name)
subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT(),
name="subtract_one")
self.assertEqual("subtract_one", subtract_one._name)
self.assertEqual("add", add._name)
@udf(input_types=DataTypes.BIGINT(), result_type=DataTypes.BIGINT(), name="named")
def named_udf(i):
return i
self.assertEqual("named", named_udf._name)
def test_abc(self):
class UdfWithoutEval(ScalarFunction):
def open(self, function_context):
pass
with self.assertRaises(
TypeError,
msg="Can't instantiate abstract class UdfWithoutEval with abstract methods eval"):
UdfWithoutEval()
def test_invalid_udf(self):
class Plus(object):
def eval(self, col):
return col + 1
with self.assertRaises(
TypeError,
msg="Invalid function: not a function or callable (__call__ is not defined)"):
# test non-callable function
self.t_env.register_function(
"non-callable-udf", udf(Plus(), DataTypes.BIGINT(), DataTypes.BIGINT()))
def test_udf_without_arguments(self):
self.t_env.register_function("one", udf(
lambda: 1, input_types=[], result_type=DataTypes.BIGINT(), deterministic=True))
self.t_env.register_function("two", udf(
lambda: 2, input_types=[], result_type=DataTypes.BIGINT(), deterministic=False))
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select("one(), two()").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,2", "1,2", "1,2"])
class PyFlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkStreamTableTestCase):
pass
class PyFlinkBlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBlinkStreamTableTestCase):
pass
class PyFlinkBlinkBatchUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBlinkBatchTableTestCase):
pass
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add(i, j):
return i + j
class SubtractOne(ScalarFunction):
def eval(self, i):
return i - 1
class Subtract(ScalarFunction):
def __init__(self):
self.subtracted_value = 0
def open(self, function_context):
self.subtracted_value = 1
def eval(self, i):
return i - self.subtracted_value
class CallablePlus(object):
def __call__(self, col):
return col + 1
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
{
"content_hash": "42231afc050f605ed56121fff6e35d34",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 99,
"avg_line_length": 43.06284153005465,
"alnum_prop": 0.5286466594759216,
"repo_name": "mbode/flink",
"id": "d529681f2e919aa5774b3ecb4efe0cdcd6d52209",
"size": "16719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/table/tests/test_udf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5666"
},
{
"name": "CSS",
"bytes": "18100"
},
{
"name": "Clojure",
"bytes": "80170"
},
{
"name": "CoffeeScript",
"bytes": "91220"
},
{
"name": "Dockerfile",
"bytes": "9756"
},
{
"name": "HTML",
"bytes": "86821"
},
{
"name": "Java",
"bytes": "40396270"
},
{
"name": "JavaScript",
"bytes": "8267"
},
{
"name": "Python",
"bytes": "249644"
},
{
"name": "Scala",
"bytes": "7495458"
},
{
"name": "Shell",
"bytes": "388738"
}
],
"symlink_target": ""
}
|
from sklearn.naive_bayes import MultinomialNB
# from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.ensemble import RandomForestClassifier
import pickle
from util import plot_roc
# spacy_tok
class NLPModel(object):
def __init__(self):
"""Simple NLP
Attributes:
clf: sklearn classifier model
vectorizor: TFIDF vectorizer or similar
"""
self.clf = MultinomialNB()
# self.vectorizer = TfidfVectorizer(tokenizer=spacy_tok)
self.vectorizer = TfidfVectorizer()
def vectorizer_fit(self, X):
"""Fits a TFIDF vectorizer to the text
"""
self.vectorizer.fit(X)
def vectorizer_transform(self, X):
"""Transform the text data to a sparse TFIDF matrix
"""
X_transformed = self.vectorizer.transform(X)
return X_transformed
def train(self, X, y):
"""Trains the classifier to associate the label with the sparse matrix
"""
# X_train, X_test, y_train, y_test = train_test_split(X, y)
self.clf.fit(X, y)
def predict_proba(self, X):
"""Returns probability for the binary class '1' in a numpy array
"""
y_proba = self.clf.predict_proba(X)
return y_proba[:, 1]
def predict(self, X):
"""Returns the predicted class in an array
"""
y_pred = self.clf.predict(X)
return y_pred
def pickle_vectorizer(self, path='chalicelib/models/TFIDFVectorizer.pkl'):
"""Saves the trained vectorizer for future use.
"""
with open(path, 'wb') as f:
pickle.dump(self.vectorizer, f)
print("Pickled vectorizer at {}".format(path))
def pickle_clf(self, path='chalicelib/models/SentimentClassifier.pkl'):
"""Saves the trained classifier for future use.
"""
with open(path, 'wb') as f:
pickle.dump(self.clf, f)
print("Pickled classifier at {}".format(path))
def plot_roc(self, X, y, size_x, size_y):
"""Plot the ROC curve for X_test and y_test.
"""
plot_roc(self.clf, X, y, size_x, size_y)
|
{
"content_hash": "a9517b27451b5c52cdec76e0e7e21184",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6090909090909091,
"repo_name": "zzsza/TIL",
"id": "c771232cb69d6dd127b1fff14dbaa24953c8821f",
"size": "2213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/machinelearing-with-flask/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "431717"
},
{
"name": "Java",
"bytes": "19334"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Julia",
"bytes": "314"
},
{
"name": "Jupyter Notebook",
"bytes": "15381217"
},
{
"name": "Python",
"bytes": "124497"
},
{
"name": "Shell",
"bytes": "1958"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cost_tracking', '0025_increase_price_decimal_places'),
]
operations = [
migrations.RemoveField(
model_name='priceestimate',
name='limit',
),
migrations.RemoveField(
model_name='priceestimate',
name='threshold',
),
]
|
{
"content_hash": "8272dbb2c5107517ba01d5aad34104ed",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.5789473684210527,
"repo_name": "opennode/nodeconductor",
"id": "466bad4eb1b02bb98dfaa89bef74e40783ef479f",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/cost_tracking/migrations/0026_remove_limit_threshold.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
}
|
import unittest
loader = unittest.TestLoader()
tests = loader.discover('.')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
|
{
"content_hash": "0d810ad8d3e107e5d5e378dfd62752d0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 28.8,
"alnum_prop": 0.7847222222222222,
"repo_name": "mscelnik/besett",
"id": "89ccbc4f7c8a997cf6e87c8216f10a4cb78e83cc",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".conda-recipe/run_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37017"
}
],
"symlink_target": ""
}
|
import csv
import json
import logging
import urllib2
import datetime
import json
import webapp2
import model
import server.seed
class Pollution:
def __init__(self, id, parameter, tecnic, period, ce01, ce02, ce03, station, timestamp, value):
self.id = id
self.parameter = parameter
self.tecnic = tecnic
self.period = period
self.ce01 = ce01
self.ce02 = ce02
self.ce03 = ce03
self.station = station
self.timestamp = timestamp
self.value = value
class Stations:
def __init__(self):
self.url = "http://www.mambiente.munimadrid.es/opendata/horario.txt"
self.fieldnames = ( "ce01","ce02","ce03","parameter","tecnic","period","year","month","day",
"hour0","v","hour1","v","hour2","v","hour3","v","hour4","v", "hour5","v",
"hour6","v","hour7","v","hour8","v","hour9","v","hour10","v","hour11","v",
"hour12","v","hour13","v","hour14","v","hour15","v","hour16","v", "hour17",
"v","hour18","v","hour19","v","hour20","v","hour21","v","hour22","v","hour23","v")
def get(self):
response = urllib2.urlopen(self.url)
rows = csv.DictReader( response, self.fieldnames)
for row in rows:
hour = datetime.datetime.now().hour - 1
stcode = int(str(row["ce01"]) + str(row["ce02"]) + str(row["ce03"]))
station = server.seed.Station.query(server.seed.Station.StationCod==stcode).get()
if station:
model.UpdateAire( '' + row["ce01"] + row["ce02"] + row["ce03"] + row["parameter"] + row["year"] + row["month"] + row["day"] + str(hour),
datetime.datetime(int(row["year"]), int(row["month"]), int(row["day"]), hour, 0),
int(row["parameter"]),
int(row["tecnic"]),
int(row["period"]),
float(row["hour" + str(hour)]),
int(row["ce01"]),
int(row["ce02"]),
int(row["ce03"]),
station
)
stations = Stations()
class RestHandler(webapp2.RequestHandler):
def dispatch(self):
#time.sleep(1)
super(RestHandler, self).dispatch()
def SendJson(self, r):
self.response.headers['content-type'] = 'text/plain'
self.response.write(json.dumps(r))
class LaunchCron(RestHandler):
def get(self):
stations.get()
STATIONS = webapp2.WSGIApplication([
('/tasks/stations', LaunchCron)
#('/rest/insert', InsertHandler),
#('/rest/delete', DeleteHandler),
#('/rest/update', UpdateHandler),
], debug=True)
|
{
"content_hash": "947ebd00b2ee6f866bf69e9f9e08f804",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 149,
"avg_line_length": 33.71604938271605,
"alnum_prop": 0.5327718784328085,
"repo_name": "emarinizquierdo/breathe-better",
"id": "9e3cf0d18603cd3c6093aa4937ec224cc6eab0de",
"size": "2731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/stations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74378"
},
{
"name": "HTML",
"bytes": "839891"
},
{
"name": "JavaScript",
"bytes": "100158"
},
{
"name": "Python",
"bytes": "15024"
}
],
"symlink_target": ""
}
|
"""Implementation of k-means clustering on top of `Estimator` API.
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering."""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
|
{
"content_hash": "40da53676d389774fdf3369eb52c9b9f",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 39.00763358778626,
"alnum_prop": 0.7011741682974559,
"repo_name": "alistairlow/tensorflow",
"id": "992b804f59ecd88fedc2fba10d3079f93c4fe83d",
"size": "10909",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/kmeans.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8572"
},
{
"name": "C",
"bytes": "314472"
},
{
"name": "C++",
"bytes": "34078509"
},
{
"name": "CMake",
"bytes": "212405"
},
{
"name": "Go",
"bytes": "1005950"
},
{
"name": "Java",
"bytes": "533607"
},
{
"name": "Jupyter Notebook",
"bytes": "1940739"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44807"
},
{
"name": "Objective-C",
"bytes": "12460"
},
{
"name": "Objective-C++",
"bytes": "94483"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "29856457"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "401880"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
import horizon
class SystemPanels(horizon.PanelGroup):
slug = "admin"
name = _("System")
panels = ('overview', 'metering', 'hypervisors', 'aggregates',
'instances', 'volumes', 'flavors', 'images',
'networks', 'routers', 'info')
class IdentityPanels(horizon.PanelGroup):
slug = "identity"
name = _("Identity")
panels = ('domains', 'projects', 'users', 'groups', 'roles')
class Admin(horizon.Dashboard):
name = _("Admin")
slug = "admin"
panels = (SystemPanels, IdentityPanels)
default_panel = 'overview'
permissions = ('openstack.roles.admin',)
horizon.register(Admin)
|
{
"content_hash": "4fe82082464e10301aea51ca17a42416",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.6335227272727273,
"repo_name": "aaronorosen/horizon-congress",
"id": "5eb41ef7cfeb3bfb302750b2f63d7a7cc216f791",
"size": "1309",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "282571"
},
{
"name": "JavaScript",
"bytes": "697632"
},
{
"name": "Python",
"bytes": "3559404"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
}
|
from pyscf import gto
from pyscf import scf
'''
Density fitting method by decorating the scf object with scf.density_fit function.
There is no flag to control the program to do density fitting for 2-electron
integration. The way to call density fitting is to decorate the existed scf
object with scf.density_fit function.
NOTE scf.density_fit function generates a new object, which works exactly the
same way as the regular scf method. The density fitting scf object is an
independent object to the regular scf object which is to be decorated. By
doing so, density fitting can be applied anytime, anywhere in your script
without affecting the exsited scf object.
See also:
examples/df/00-with_df.py
examples/df/01-auxbasis.py
'''
mol = gto.Mole()
mol.build(
verbose = 0,
atom = '''8 0 0. 0
1 0 -0.757 0.587
1 0 0.757 0.587''',
basis = 'ccpvdz',
)
mf = scf.density_fit(scf.RHF(mol))
energy = mf.kernel()
print('E = %.12f, ref = -76.026744737355' % energy)
#
# Stream style: calling .density_fit method to return a DF-SCF object.
#
mf = scf.RHF(mol).density_fit()
energy = mf.kernel()
print('E = %.12f, ref = -76.026744737355' % energy)
#
# By default optimal auxiliary basis (if possible) or even-tempered gaussian
# functions are used fitting basis. You can assign with_df.auxbasis to change
# the change the fitting basis.
#
mol.spin = 1
mol.charge = 1
mol.build(0, 0)
mf = scf.UKS(mol).density_fit()
mf.with_df.auxbasis = 'cc-pvdz-jkfit'
energy = mf.kernel()
print('E = %.12f, ref = -75.390366559552' % energy)
# Switch off density fitting
mf.with_df = False
energy = mf.kernel()
print('E = %.12f, ref = %.12f' % (energy, scf.UKS(mol).kernel()))
|
{
"content_hash": "082ee916cae05940380711484acc6204",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 29.53448275862069,
"alnum_prop": 0.699357851722125,
"repo_name": "sunqm/pyscf",
"id": "e80263884ebd4aa2b03b96dfa6c514202d5acb59",
"size": "1784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/scf/20-density_fitting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2805171"
},
{
"name": "CMake",
"bytes": "19597"
},
{
"name": "Common Lisp",
"bytes": "40515"
},
{
"name": "Dockerfile",
"bytes": "447"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "19630497"
},
{
"name": "Roff",
"bytes": "429"
},
{
"name": "Shell",
"bytes": "6564"
}
],
"symlink_target": ""
}
|
import saga
import os
import sys
import re
import random
import time
import pwd
def submit_remote_job(mod, username, params):
read_pipe, write_pipe = os.pipe()
pid = os.fork()
if pid:
os.close(write_pipe)
r = os.fdopen(read_pipe)
os.waitpid(pid,0)
output = r.read()
jid, jout, jerr = [ o for o in output.split(',') ]
return jid, jout, jerr
else:
print os.getuid()
os.close(read_pipe)
w = os.fdopen(write_pipe, 'w')
euid = int(pwd.getpwnam(username)[2])
os.setuid(euid)
print os.getuid()
ctx = saga.Context("UserPass")
ctx.user_id = username
session = saga.Session()
session.add_context(ctx)
service = saga.job.Service("slurm://localhost",session=session)
# job info from database...
job_id = 'saga.'+str(random.randint(10000,90000))
job = saga.job.Description()
job.working_directory = "/lustre/janus_scratch/%s/webapi"%username
# Look up job information
job.wall_time_limit = 5 # minutes
job.total_cpu_count = 12
job.queue = "debug"
job.output = mod.name + '.' + job_id + '.out'
job.error = mod.name + '.' + job_id + '.err'
job.executable = mod.cmd
if params != {}:
job.arguments = params
touchjob = service.create_job(job)
touchjob.run()
jid = re.findall(r'\[([0-9]+)\]', touchjob.get_id())[0]
jout = os.path.join(job.working_directory,job.output)
jerr = os.path.join(job.working_directory,job.error)
service.close()
print jid,jout,jerr
w.write("%s,%s,%s"%(jid,jout,jerr))
w.close()
os._exit(0)
def get_job_ouput(job):
# ctx = saga.Context("UserPass")
# ctx.user_id = username
session = saga.Session()
# session.add_context(ctx)
print job.output
# output = 'sftp://localhost' + job.output
# tmp_ = '/Users/mlunacek/projects/myhat/app/tmp/tmp.output'
# target = 'file://localhost' + tmp_
target = job.output
# out = saga.filesystem.File(output, session=session)
# if out is None:
# return '\nFile does not exist yet\n'
# out.copy(target)
# while not os.path.exists(tmp_):
# time.sleep(1)
with open(target, 'r') as infile:
data = infile.read()
# os.remove(tmp_)
return data
#return jsonify( {'success': 'True'} )
# except:
# print 'fail'
#return jsonify( {'success': 'False'} )
# print "Job State : %s" % (touchjob.state)
# print "Exitcode : %s" % (touchjob.exit_code)
# # print "output : %s" % (touchjob.output)
# print 'id : %s' % (touchjob.get_id())
# print 'output : %s' % (job.output)
# print 'error : %s' % (job.error)
# print ' '.join(job.arguments)
# print job.executable
# print re.findall(r'\[([0-9]+)\]', touchjob.get_id())[0]
# print os.path.join(job.working_directory,job.output)
# def execute_remote_command(user_id, job_name):
# ctx = saga.Context("ssh")
# ctx.user_id = user_id
# session = saga.Session()
# session.add_context(ctx)
# service = saga.job.Service("ssh://login",session=session)
# job = saga.job.Description()
# job.working_directory = "$HOME"
# # Look up job information
# job.output = user_id + '.' + job_name + '.out'
# job.error = user_id + '.' + job_name + '.err'
# job.executable = 'showq'
# job.arguments = ['-u', ctx.user_id]
# touchjob = service.create_job(job)
# print "Job State : %s" % (touchjob.state)
# print "Exitcode : %s" % (touchjob.exit_code)
# touchjob.run()
# print "Job State : %s" % (touchjob.state)
# print "Exitcode : %s" % (touchjob.exit_code)
# touchjob.wait()
# print "Job State : %s" % (touchjob.state)
# print "Exitcode : %s" % (touchjob.exit_code)
# dir = saga.namespace.Directory("sftp://login/home/molu8455")
# data = dir.open('examplejob.out')
# print data
# print data.get_size()
# data.close()
# dir.close()
# outfilesource = 'sftp://login/home/molu8455/' + job.output
# tmp_ = '/Users/mlunacek/projects/myhat/app/tmp/' + job.output
# outfiletarget = 'file://localhost/' + tmp_
# out = saga.filesystem.File(outfilesource, session=session)
# out.copy(outfiletarget)
# while not os.path.exists(tmp_):
# time.sleep(1)
# with open(tmp_, 'r') as infile:
# data = infile.read()
# print data
# service.close()
# submit_remote_job('molu8455', 'rengers')
# time.sleep(10)
# execute_remote_command('molu8455', 'showq')
|
{
"content_hash": "59be7560275584307502d2200d5a5ce4",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 74,
"avg_line_length": 27.39325842696629,
"alnum_prop": 0.5490155865463495,
"repo_name": "ResearchComputing/myhat",
"id": "ed4b0e27bdc81750b64802ea98125f8dd42d06bf",
"size": "4876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1246"
},
{
"name": "Python",
"bytes": "21529"
}
],
"symlink_target": ""
}
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
# Required for ReadTheDocs
from functools import wraps # pylint: disable=unused-import
import json
import os
import sys
#import logging
from flask import Flask, request
from flask_restful import Api, Resource
from dmp import dmp
from reader.hdf5_reader import hdf5_reader
from mg_rest_util.mg_auth import authorized
APP = Flask(__name__)
#logging.basicConfig()
def help_usage(error_message, status_code,
parameters_required, parameters_provided):
"""
Usage Help
Description of the basic usage patterns for GET functions for the app,
including any parameters that were provided byt he user along with the
available parameters that are required/optional.
Parameters
----------
error_message : str | None
Error message detailing what has gone wrong. If there are no errors then
None should be passed.
status_code : int
HTTP status code.
parameters_required : list
List of the text names for each paramter required by the end point. An
empty list should be provided if there are no parameters required
parameters_provided : dict
Dictionary of the parameters and the matching values provided by the
user. An empyt dictionary should be passed if there were no parameters
provided by the user.
Returns
-------
str
JSON formated status message to display to the user
"""
parameters = {
'by_user': ['By User [0|1]', 'int', 'OPTIONAL'],
'file_id': ['File ID', 'str', 'REQUIRED'],
'region': ['Chromosome:Start:End', 'str:int:int', 'OPTIONAL'],
'file_type': ['File type (bb, bw, tsv, fasta, fastq, ...)', 'str', 'OPTIONAL'],
'data_type': ['Data type (chip-seq, rna-seq, wgbs, ...)', 'str', 'OPTIONAL'],
'assembly': ['Assembly', 'str', 'REQUIRED'],
'chrom': ['Chromosome', 'str', 'OPTIONAL'],
'start': ['Start', 'int', 'OPTIONAL'],
'end': ['End', 'int', 'OPTIONAL'],
'type': ['add_meta|remove_meta', 'str', 'OPTIONAL'],
'output': [
"Default is None. State 'original' to return the original whole file",
'str', 'OPTIONAL'],
}
used_param = {k : parameters[k] for k in parameters_required if k in parameters}
usage = {
'_links' : {
'_self' : request.base_url,
'_parent' : request.url_root + 'mug/api/dmp'
},
'parameters' : used_param
}
message = {
'usage' : usage,
'status_code' : status_code
}
if parameters_provided:
message['provided_parameters'] = parameters_provided
if error_message is not None:
message['error'] = error_message
return message
def _get_dm_api(user_id=None):
cnf_loc = os.path.dirname(os.path.abspath(__file__)) + '/mongodb.cnf'
if user_id == 'test':
print("TEST USER DM API")
return dmp(cnf_loc, test=True)
if os.path.isfile(cnf_loc) is True:
print("LIVE DM API")
return dmp(cnf_loc)
print("TEST DM API")
return dmp(cnf_loc, test=True)
class EndPoints(Resource):
"""
Class to handle the http requests for returning information about the end
points
"""
def get(self):
"""
GET list all end points
List of all of the end points for the current service.
Example
-------
.. code-block:: none
:linenos:
curl -X GET http://localhost:5002/mug/api/dmp
"""
return {
'_links': {
'_self': request.base_url,
'_getFile': request.url_root + 'mug/api/dmp/file',
'_getFiles': request.url_root + 'mug/api/dmp/files',
'_getFileHistory': request.url_root + 'mug/api/dmp/file_history',
'_ping': request.url_root + 'mug/api/dmp/ping',
'_parent': request.url_root + 'mug/api'
}
}
class FileMeta(Resource):
"""
Class to handle the http requests for retrieving the data from a file.
This class is able to handle big[Bed|Wig] file and serve back the matching
region in the relevant format. It is also possible to stream back the whole
file of any type for use in other tools.
"""
@authorized
def get(self, user_id):
"""
GET List values from the file
Call to optain regions from the conpressed index files for Bed, Wig and
TSV based file formats that contain genomic information.
Other files can be streamed.
Parameters
----------
file_id : str
Identifier of the file to retrieve data from
Returns
-------
file
Returns a formated in the relevant file type with any genomic
features matching the format of the file.
Examples
--------
.. code-block:: none
:linenos:
curl -X GET http://localhost:5002/mug/api/dmp/track?file_id=test_file
"""
file_id = request.args.get('file_id')
public = request.args.get('public')
params = [file_id]
# Display the parameters available
if sum([x is None for x in params]) == len(params):
return help_usage(None, 200, ['file_id'], {})
# ERROR - one of the required parameters is NoneType
if sum([x is not None for x in params]) != len(params):
return help_usage('MissingParameters', 400, ['file_id'], {})
if user_id is not None:
selected_user_id = user_id['user_id']
if public is not None:
selected_user_id = user_id['public_id']
dmp_api = _get_dm_api(selected_user_id)
return dmp_api.get_file_by_id(selected_user_id, file_id)
return help_usage('Forbidden', 403, ['file_id'], {})
@authorized
def post(self, user_id):
"""
POST Add a new file to the DM API
Parameters
----------
This should be passed as the data block with the HTTP request:
json : dict
user_id : str
User identifier
file_path : str
Location of the file
file_type : str
Tag for the file extension. The valid parameters are defined
within the DM API documentation (mg-dm-api)
data_type : str
What type of experiment is this data from. Options include
RNA-seq
ChIP-seq
MNase-seq
WGBS
HiC
taxon_id : int
Taxonomic identifier for a species (Human = 9606)
compressed
Options of the compression level of the file. If file is not
compressed then do not include this parameter
source_id : list
List of file_ids that were used for generating this file.
If this is the root file then do not include this parameter.
meta_data : dict
Hash array describing the relevant metadata for the file,
including the assembly if relevant
Returns
-------
file_id
Returns the id of the stored file
Example
-------
.. code-block:: none
:linenos:
echo '{
"data_type": "RNA-seq",
"file_type": "fastq",
"meta_data": {
"assembly" : "GRCh38"
}, "taxon_id": 9606,
"file_path": "/tmp/test/path/RNA-seq/testing_123.fastq",
"parent_dir" "/tmp/test/path/RNA-seq/",
"size": 64000,
}' > data.json
curl -X POST
-H "Content-Type: application/json"
-H "Authorization: Bearer teststring"
-d @data.json http://localhost:5002/mug/api/dmp/file
"""
if user_id is not None:
dmp_api = _get_dm_api(user_id['user_id'])
new_track = json.loads(request.data)
file_path = new_track['file_path'] if 'file_path' in new_track else None
file_type = new_track['file_type'] if 'file_type' in new_track else None
size = new_track['size'] if 'size' in new_track else None
parent_dir = new_track['parent_dir'] if 'parent_dir' in new_track else None
data_type = new_track['data_type'] if 'data_type' in new_track else None
taxon_id = new_track['taxon_id'] if 'taxon_id' in new_track else None
source_id = new_track['source_id'] if 'source_id' in new_track else None
meta_data = new_track['meta_data'] if 'meta_data' in new_track else None
compressed = new_track['compressed'] if 'compressed' in new_track else None
params_required = ['user_id', 'file_path', 'file_type', 'data_type',
'taxon_id', 'source_id', 'meta_data']
params = [user_id, file_path, file_type, data_type, taxon_id,
source_id, meta_data]
# ERROR - one of the required parameters is NoneType
if sum([x is not None for x in params]) != len(params):
return help_usage('MissingParameters', 400, params_required,
user_id)
new_track = json.loads(request.data)
return dmp_api.set_file(
user_id['user_id'],
file_path,
file_type,
size,
parent_dir,
data_type,
taxon_id,
compressed,
source_id,
meta_data
)
return help_usage('Forbidden', 403, [], {})
@authorized
def put(self, user_id):
"""
PUT Update meta data
Request to update the meta data for a given file. This allows for the
adding or removal of key-value pairs from the meta data.
Parameters
----------
This should be passed as the data block with the HTTP request:
json : dict
user_id : str
User identifier
file_id : str
ID of the stored file
type : str
Options are 'add_meta' or 'remove_meta' to modify they key-value
pairs for the file entry. Minimum sets of pairs are defined
within the DM API (mg-dm-api)
meta_data : dict
Hash array describing the relevant metadata key-value pairs that
are to be added
Returns
-------
file_id
Returns the id of the stored file
Example
-------
To add a new key value pair:
.. code-block:: none
:linenos:
echo '{
"type":"add_meta",
"file_id":"<file_id>",
"meta_data":{"citation":"PMID:1234567890"}
}' > data.json
curl -X PUT
-H "Content-Type: application/json"
-H "Authorization: Bearer teststring"
-d @data.json http://localhost:5002/mug/api/dmp/file
To remove a key value pair:
.. code-block:: none
:linenos:
echo '{
"type":"remove_meta",
"file_id":"<file_id>",
"meta_data":["citation"]
}' > data.json
curl -X PUT
-H "Content-Type: application/json"
-H "Authorization: Bearer teststring"
-d @data.json http://localhost:5002/mug/api/dmp/file
To modify a column value (file size):
.. code-block:: none
:linenos:
echo '{
"type":"modify_column",
"file_id":"<file_id>",
"key":"<column_key>"
"value":"<new_value>"
}' > data.json
curl -X PUT
-H "Content-Type: application/json"
-H "Authorization: Bearer teststring"
-d @data.json http://localhost:5002/mug/api/dmp/file
"""
if user_id is not None:
dmp_api = _get_dm_api(user_id['user_id'])
data_put = json.loads(request.data)
file_id = data_put['file_id']
params_required = ['user_id', 'file_id', 'type']
if data_put['type'] == 'add_meta':
for k in data_put['meta_data']:
result = dmp_api.add_file_metadata(
user_id['user_id'], file_id, k, data_put['meta_data'][k])
elif data_put['type'] == 'remove_meta':
for k in data_put['meta_data']:
result = dmp_api.remove_file_metadata(user_id['user_id'], file_id, k)
elif data_put['type'] == 'modify_column':
result = dmp_api.modify_column(
user_id['user_id'], file_id, data_put['key'], data_put['value']
)
else:
return help_usage('MissingMetaDataParameters', 400, params_required,
{'type' : ['add_meta', 'remove_meta', 'modify_column']})
return result
return help_usage('Forbidden', 403, [], {})
@authorized
def delete(self, user_id):
"""
DELETE Remove a file from the DM API
Function to remove the file from teh DM API. This will result in the
file being removed from the records and therefore not available within
the VRE or from the RESTful interface
Parameters
----------
This should be passed as the data block with the HTTP request:
json : dict
file_id : str
ID of the stored file
Example
-------
.. code-block:: none
:linenos:
echo '{
"file_id":"<file_id>"
}' > data.json
curl -X DELETE
-H "Content-Type: application/json"
-H "Authorization: Bearer teststring"
-d @data.json http://localhost:5002/mug/api/dmp/file
"""
if user_id is not None:
dmp_api = _get_dm_api(user_id['user_id'])
params_required = ['user_id', 'file_id']
data_delete = json.loads(request.data)
if data_delete['file_id']:
file_id = dmp_api.remove_file(user_id['user_id'], data_delete['file_id'])
else:
return help_usage('MissingMetaDataParameters', 400, params_required,
{})
return file_id
return help_usage('Forbidden', 403, [], {})
class Files(Resource):
"""
Class to handle the http requests for retrieving the list of files for a
given user handle
"""
@authorized
def get(self, user_id):
"""
GET List user tracks
Function to list the filess that are owned by a single user. It is
possible to filter by assembly, file or data type, or to find track
files that contain data for a given region
Parameters
----------
assembly : str
Genome assembly accession
region : str
<chromosome>:<start_pos>:<end_pos>
file_type : str
data_type : str
Example
-------
.. code-block:: none
:linenos:
curl -X GET http://localhost:5002/mug/api/dmp/Files?>
"""
if user_id is not None:
region = request.args.get('region')
assembly = request.args.get('assembly')
file_type = request.args.get('file_type')
data_type = request.args.get('data_type')
by_user = request.args.get('by_user')
public = request.args.get('public')
params = [user_id]
# Display the parameters available
if sum([x is None for x in params]) == len(params):
return help_usage(
None, 200,
['region', 'assembly', 'file_type', 'data_type', 'by_user'], {})
selected_user_id = user_id['user_id']
if public is not None:
selected_user_id = user_id['public_id']
dmp_api = _get_dm_api(selected_user_id)
files = []
if region is not None and assembly is not None:
files = self._get_all_files_region(dmp_api, selected_user_id, assembly, region)
elif file_type is not None and assembly is not None:
files = dmp_api.get_files_by_file_type(selected_user_id)
elif data_type is not None and assembly is not None:
files = dmp_api.get_files_by_data_type(selected_user_id)
elif assembly is not None:
files = dmp_api.get_files_by_assembly(selected_user_id, assembly)
elif by_user is not None and int(by_user) == 1:
files = dmp_api.get_files_by_user(selected_user_id)
else:
return help_usage(
None, 200,
['region', 'assembly', 'file_type', 'data_type', 'by_user'], {})
return {
'_links': {
'_self': request.base_url,
'_parent' : request.url_root + 'mug/api/dmp'
},
'files': files
}
return help_usage(
None, 200,
['region', 'assembly', 'file_type', 'data_type', 'by_user'], {})
def _get_all_files_region(self, dmp_api, user_id, assembly, region):
files = []
chrom, start, end = region.split(':')
h5_idx = hdf5_reader(user_id['user_id'])
potential_files = h5_idx.get_regions(assembly, chrom, int(start), int(end))
for f_in in potential_files[1]:
files.append(dmp_api.get_file_by_id(f_in))
for f_in in potential_files[1000]:
files.append(dmp_api.get_file_by_id(f_in))
return files
class FileHistory(Resource):
"""
Class to handle the http requests for retrieving the list of file history of
a given file for a given user handle
"""
@authorized
def get(self, user_id):
"""
GET the list of files that were used for generating the defined file
Example
-------
.. code-block:: none
:linenos:
curl -X GET http://localhost:5002/mug/api/dmp/file_history?file_id=<file_id>
"""
if user_id is not None:
dmp_api = _get_dm_api(user_id['user_id'])
file_id = request.args.get('file_id')
params = [user_id, file_id]
# Display the parameters available
if sum([x is None for x in params]) == len(params):
return help_usage(None, 200, [], {})
# ERROR - one of the required parameters is NoneType
if sum([x is not None for x in params]) != len(params):
return help_usage('MissingParameters', 400, [],
{
'file_id' : file_id
})
files = dmp_api.get_file_history(user_id['user_id'], file_id)
return {
'_links': {
'_self': request.base_url,
'_parent' : request.url_root + 'mug/api/dmp'
},
'history_files': files
}
return help_usage('Forbidden', 403, [], {})
class Ping(Resource):
"""
Class to handle the http requests to ping a service
"""
def get(self):
"""
GET Status
List the current status of the service along with the relevant
information about the version.
Example
-------
.. code-block:: none
:linenos:
curl -X GET http://localhost:5002/mug/api/dmp/ping
"""
import rest.release as release
res = {
"status": "ready",
"version": release.__version__,
"author": release.__author__,
"license": release.__license__,
"name": release.__rest_name__,
"description": release.__description__,
"_links" : {
'_self' : request.base_url,
'_parent' : request.url_root + 'mug/api/dmp'
}
}
return res
#
# For the services where there needs to be an extra layer (adjacency lists),
# then there needs to be a way of forwarding for this. But the majority of
# things can be redirected to the raw files for use as a track.
#
sys._auth_meta_json = os.path.dirname(os.path.realpath(__file__)) + '/auth_meta.json'
# Define the URIs and their matching methods
REST_API = Api(APP)
# List the available end points for this service
REST_API.add_resource(EndPoints, "/mug/api/dmp", endpoint='dmp_root')
# Get the data for a specific track
REST_API.add_resource(FileMeta, "/mug/api/dmp/file_meta", endpoint='file_meta')
# List the available species for which there are datasets available
REST_API.add_resource(Files, "/mug/api/dmp/files", endpoint='files')
# List file history
REST_API.add_resource(FileHistory, "/mug/api/dmp/file_history", endpoint='file_history')
# Service ping
REST_API.add_resource(Ping, "/mug/api/dmp/ping", endpoint='dmp-ping')
# Initialise the server
if __name__ == "__main__":
APP.run(port=5002, debug=True, use_reloader=False)
|
{
"content_hash": "85f6f53c0fe29661131fcef2a6b9695d",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 95,
"avg_line_length": 33.07111111111111,
"alnum_prop": 0.5365318281592976,
"repo_name": "Multiscale-Genomics/mg-rest-dm",
"id": "894c69cd7090330ca2b0901d622320c57b61a41e",
"size": "22323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34913"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.