text stringlengths 4 1.02M | meta dict |
|---|---|
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, azure_dev_ops_connector_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"azureDevOpsConnectorName": _SERIALIZER.url(
"azure_dev_ops_connector_name", azure_dev_ops_connector_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"azureDevOpsConnectorName": _SERIALIZER.url(
"azure_dev_ops_connector_name", azure_dev_ops_connector_name, "str"
),
"azureDevOpsOrgName": _SERIALIZER.url("azure_dev_ops_org_name", azure_dev_ops_org_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"azureDevOpsConnectorName": _SERIALIZER.url(
"azure_dev_ops_connector_name", azure_dev_ops_connector_name, "str"
),
"azureDevOpsOrgName": _SERIALIZER.url("azure_dev_ops_org_name", azure_dev_ops_org_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"azureDevOpsConnectorName": _SERIALIZER.url(
"azure_dev_ops_connector_name", azure_dev_ops_connector_name, "str"
),
"azureDevOpsOrgName": _SERIALIZER.url("azure_dev_ops_org_name", azure_dev_ops_org_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
class AzureDevOpsOrgOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.securitydevops.MicrosoftSecurityDevOps`'s
:attr:`azure_dev_ops_org` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, azure_dev_ops_connector_name: str, **kwargs: Any
) -> Iterable["_models.AzureDevOpsOrg"]:
"""list.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureDevOpsOrg or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrgListResponse]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AzureDevOpsOrgListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, azure_dev_ops_connector_name: str, azure_dev_ops_org_name: str, **kwargs: Any
) -> _models.AzureDevOpsOrg:
"""Returns a monitored AzureDevOps Org resource for a given ID.
Returns a monitored AzureDevOps Org resource for a given ID.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureDevOpsOrg or the result of cls(response)
:rtype: ~azure.mgmt.securitydevops.models.AzureDevOpsOrg
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrg]
request = build_get_request(
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
azure_dev_ops_org_name=azure_dev_ops_org_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Union[_models.AzureDevOpsOrg, IO],
**kwargs: Any
) -> _models.AzureDevOpsOrg:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrg]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(azure_dev_ops_org, (IO, bytes)):
_content = azure_dev_ops_org
else:
_json = self._serialize.body(azure_dev_ops_org, "AzureDevOpsOrg")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
azure_dev_ops_org_name=azure_dev_ops_org_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: _models.AzureDevOpsOrg,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Creates or updates an Azure DevOps Org.
Creates or updates an Azure DevOps Org.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Required.
:type azure_dev_ops_org: ~azure.mgmt.securitydevops.models.AzureDevOpsOrg
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Creates or updates an Azure DevOps Org.
Creates or updates an Azure DevOps Org.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Required.
:type azure_dev_ops_org: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Union[_models.AzureDevOpsOrg, IO],
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Creates or updates an Azure DevOps Org.
Creates or updates an Azure DevOps Org.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Is either a model type or a IO
type. Required.
:type azure_dev_ops_org: ~azure.mgmt.securitydevops.models.AzureDevOpsOrg or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrg]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
azure_dev_ops_org_name=azure_dev_ops_org_name,
azure_dev_ops_org=azure_dev_ops_org,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Optional[Union[_models.AzureDevOpsOrg, IO]] = None,
**kwargs: Any
) -> _models.AzureDevOpsOrg:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrg]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(azure_dev_ops_org, (IO, bytes)):
_content = azure_dev_ops_org
else:
if azure_dev_ops_org is not None:
_json = self._serialize.body(azure_dev_ops_org, "AzureDevOpsOrg")
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
azure_dev_ops_org_name=azure_dev_ops_org_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Optional[_models.AzureDevOpsOrg] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Update monitored AzureDevOps Org details.
Update monitored AzureDevOps Org details.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Default value is None.
:type azure_dev_ops_org: ~azure.mgmt.securitydevops.models.AzureDevOpsOrg
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Update monitored AzureDevOps Org details.
Update monitored AzureDevOps Org details.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Default value is None.
:type azure_dev_ops_org: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
azure_dev_ops_connector_name: str,
azure_dev_ops_org_name: str,
azure_dev_ops_org: Optional[Union[_models.AzureDevOpsOrg, IO]] = None,
**kwargs: Any
) -> LROPoller[_models.AzureDevOpsOrg]:
"""Update monitored AzureDevOps Org details.
Update monitored AzureDevOps Org details.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param azure_dev_ops_connector_name: Name of the AzureDevOps Connector. Required.
:type azure_dev_ops_connector_name: str
:param azure_dev_ops_org_name: Name of the AzureDevOps Org. Required.
:type azure_dev_ops_org_name: str
:param azure_dev_ops_org: Azure DevOps Org resource payload. Is either a model type or a IO
type. Default value is None.
:type azure_dev_ops_org: ~azure.mgmt.securitydevops.models.AzureDevOpsOrg or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AzureDevOpsOrg or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securitydevops.models.AzureDevOpsOrg]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureDevOpsOrg]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
azure_dev_ops_connector_name=azure_dev_ops_connector_name,
azure_dev_ops_org_name=azure_dev_ops_org_name,
azure_dev_ops_org=azure_dev_ops_org,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AzureDevOpsOrg", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SecurityDevOps/azureDevOpsConnectors/{azureDevOpsConnectorName}/orgs/{azureDevOpsOrgName}"} # type: ignore
| {
"content_hash": "d56ca77cbd5fc4f7ada4221b46b8021d",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 244,
"avg_line_length": 47.42316784869976,
"alnum_prop": 0.6501994017946161,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7e2959515d5bc86009e7c4833ad02c97928a59a1",
"size": "40620",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/securitydevops/azure-mgmt-securitydevops/azure/mgmt/securitydevops/operations/_azure_dev_ops_org_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
watchLogPage
"""
__author__ = 'Rnd495'
import re
import urllib
import tornado.web
import tornado.gen
from UI.Manager import mapping
from UI.Page import PageBase, NoticeAndRedirectInterruption
from Pages.APIPage import get_ref_status
TENHOU_REG = re.compile(r"^(?P<ref>\d{10}gm-\w{4}-\d{4,5}-\w{8})$")
@mapping(r'/watch/log')
class WatchLogPage(PageBase):
"""
WatchLogPage
"""
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
ref = self.get_argument("ref", None)
if ref is None or not TENHOU_REG.match(ref):
raise NoticeAndRedirectInterruption(u'无效的索引值"%s"' % ref, title=u'参数错误')
# check log existence
status = yield get_ref_status(ref)
if status['status'] != 'ok':
raise NoticeAndRedirectInterruption(u'无效的索引值"%s",%s' % (ref, status['status']), title=u'参数错误')
params = {'log': ref}
for i in range(4):
key = "UN%d" % i
value = self.get_argument(key, None)
if value is not None:
params[key] = value
else:
break
try:
params['tw'] = int(self.get_argument("tw", None))
except (ValueError, TypeError):
pass
# encode unicode to utf-8
for key in params:
value = params[key]
if isinstance(value, unicode):
params[key] = value.encode("utf-8")
tenhou_url = 'http://tenhou.net/5/?' + urllib.urlencode(params)
self.render('watchLog.html', tenhou_url=tenhou_url) | {
"content_hash": "348fbdfe6a228a0edda413592fa0bf55",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 106,
"avg_line_length": 27.155172413793103,
"alnum_prop": 0.5765079365079365,
"repo_name": "SakuraSa/TenhouLoggerX",
"id": "23543007c6f3a087a47a63d615c1a44e3005d6db",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pages/watchLogPage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1412"
},
{
"name": "HTML",
"bytes": "19738"
},
{
"name": "JavaScript",
"bytes": "11684"
},
{
"name": "Python",
"bytes": "57478"
}
],
"symlink_target": ""
} |
'''Generates convos from pcap files ##(srcip,destip,proto,timestamp)'''
from operator import itemgetter
import time
from convo_util import *
global sample
global n_max
global n_input_pcaps
global label
clean_names = ['p2pbox']
mal_names = ['zeus', 'waledac, gtisc-winobot']
print sys.argv[1]
TIMEOUT = 2000 #standard timeout of 600ms
out_file = open(sys.argv[1] + '_output_'+ str(TIMEOUT)+'_convos','w')
out_file.close()
label = ''
for line in clean_names:
if line in sys.argv[1]:
label = line
for line in mal_names:
if line in sys.argv[1]:
label = line
if label == '':
label = 'botnet'
n_max = 0
inp_file = open(sys.argv[1], 'r')
n_input_pcaps = []
def generateConvoAttributes(flow):
'''Generates attributes for flow(SourceIp,DestIP,TimeStamp,Proto,packet_len,SourcePort, DestPort,reconnects)''' #Number of reconnects ommited
try:
if len(flow) < 2: #Ignoring flows with less than 2 packets
return
getPacketLevelInfo(flow, label)
except Exception:
return
def getDistinctFlags():
global n_input_pcaps
n_set = set()
for line in n_input_pcaps:
if line[2] == '6':
if line[7] == 1:
print "Syn found"
n_set.add(tuple(line[6:12]))
print n_set
def getDestPackets(i, dest_ip, source_ip):
global n_input_pcaps
#print i, dest_ip, source_ip
index = i + 1
prev = []
for index in range(i+1, len(n_input_pcaps)):
prev = n_input_pcaps[index]
if prev[0].strip() == dest_ip and prev[1].strip() == source_ip and prev[-1] == 'unmarked':
break #found first index
curr = []
data = []
data.append(prev)
for k in range(index, len(n_input_pcaps)):
curr = n_input_pcaps[k]
if curr[-1] != 'unmarked':
continue
if prev[0].strip() == dest_ip and prev[1].strip() == source_ip:
pass
else:
return data
prev[-1] = 'marked'
data.append(prev)
prev = curr
return data
def generateConvos():
global sample
global n_max
if n_max < len(sample):
n_max = len(sample)
if sample == []:
return []
curr_flow = []
curr_flow.append(sample[0])
i = 1
for i in range(1, len(sample)):
prev = sample[i-1]
curr = sample[i]
if ((float(curr[3]) - float(prev[3])) <= TIMEOUT):
curr_flow.append(curr)
else:
if len(curr_flow) > 0:
pass
#out_file_udp.write(','.join(curr_flow[0]) + '\n')
curr_flow = []
curr_flow.append(curr)
prev = curr
flow_to_send = []
for item in curr_flow:
flow_to_send.append(item[0:-2])
#print flow_to_send
generateConvoAttributes(flow_to_send)
for line in inp_file:
#print line
n_input_pcaps.append(line.strip().split(','))#.extend(['unmarked']))
n_input_pcaps[-1].append('unmarked')
inp_file.close()
#n_input_pcaps.sort(key = itemgetter(0,1,2)) #Not needed as files are already sorted
'''Generate Conversations'''
prev = []
sample = []
index = 0
prev = n_input_pcaps[index]
for i in range(index + 1,len(n_input_pcaps)):
curr = n_input_pcaps[i]
if curr[-1] != 'unmarked':
continue
sample.append(prev)
prev[-1] = 'marked'
if prev[0].strip() == curr[0].strip() and prev[1].strip() == curr[1].strip():#and ((float(curr[3]) - float(prev[3])) <= UDPTIMEOUT): #Fix for unidirectional and bidirectional flows
pass
else:
#Time to generate a convo
#Fix for Bidirecftional bidirectional
ext_sample = getDestPackets(i, prev[1].strip(), prev[0].strip())
if len(ext_sample[0]) > 0:
sample.extend(ext_sample)
sample = sorted(sample, key = itemgetter(3))
generateConvos()
sample = []
prev = curr
sample.append(prev)
sample = sorted(sample, key = itemgetter(3))
generateConvos() #for last flow
print n_max
| {
"content_hash": "56eae02384d4cfd43efabd1328227690",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 185,
"avg_line_length": 28.71014492753623,
"alnum_prop": 0.5875820292781424,
"repo_name": "vansh21k/SMADES",
"id": "ca3ec77b33faa8ad47c619565fb79dd6c77950ed",
"size": "3962",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "FeatureExtraction/convo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55386"
},
{
"name": "R",
"bytes": "1346"
},
{
"name": "Shell",
"bytes": "1598"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| {
"content_hash": "8e78b0192e47707550de4702d628b1bb",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 44,
"avg_line_length": 25.15,
"alnum_prop": 0.6481113320079522,
"repo_name": "artopping/nyu-python",
"id": "d8eed4f71b48bd5482f40299cbfdf488d6372d96",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course3/assignments/django_blog/djangogirls/blog/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "77"
},
{
"name": "CSS",
"bytes": "4488"
},
{
"name": "HTML",
"bytes": "24199"
},
{
"name": "Python",
"bytes": "180205"
},
{
"name": "Shell",
"bytes": "31730"
},
{
"name": "Vim script",
"bytes": "170"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE':
'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'utdcs-db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST':
'', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['utdcs.joshcai.repl.co', 'localhost', '0.0.0.0', '*.repl.co', '*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&z(1tsn5ob-cejxq-0-&*!afujy-(u&hz41me7ou92pyhuf=2&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'website.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
# ... some options here ...
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'processing',
# 'south', #database migration
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "496aff44b4cdf442046f047db1b83791",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 99,
"avg_line_length": 33.11656441717791,
"alnum_prop": 0.6780288995924416,
"repo_name": "joshcai/utdcs",
"id": "9a4e6e8ca572ca06d565dba85a4fc84270d8f9a1",
"size": "5438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42668"
},
{
"name": "C++",
"bytes": "48490"
},
{
"name": "CSS",
"bytes": "41109"
},
{
"name": "Cython",
"bytes": "133874"
},
{
"name": "Fortran",
"bytes": "298"
},
{
"name": "HTML",
"bytes": "8407"
},
{
"name": "JavaScript",
"bytes": "440875"
},
{
"name": "Nix",
"bytes": "382"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "PowerShell",
"bytes": "8834"
},
{
"name": "Python",
"bytes": "8302122"
},
{
"name": "Shell",
"bytes": "3828"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CartRuleRestrictionGroupItem(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, type=None, id_item=None):
"""
CartRuleRestrictionGroupItem - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'id_item': 'int'
}
self.attribute_map = {
'type': 'type',
'id_item': 'id_item'
}
self._type = type
self._id_item = id_item
@property
def type(self):
"""
Gets the type of this CartRuleRestrictionGroupItem.
Can be: product, subscription, category, actor or director
:return: The type of this CartRuleRestrictionGroupItem.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this CartRuleRestrictionGroupItem.
Can be: product, subscription, category, actor or director
:param type: The type of this CartRuleRestrictionGroupItem.
:type: str
"""
self._type = type
@property
def id_item(self):
"""
Gets the id_item of this CartRuleRestrictionGroupItem.
Item ID to restrict
:return: The id_item of this CartRuleRestrictionGroupItem.
:rtype: int
"""
return self._id_item
@id_item.setter
def id_item(self, id_item):
"""
Sets the id_item of this CartRuleRestrictionGroupItem.
Item ID to restrict
:param id_item: The id_item of this CartRuleRestrictionGroupItem.
:type: int
"""
self._id_item = id_item
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "c75206833ab4385432dcd93730689e82",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 77,
"avg_line_length": 26.268115942028984,
"alnum_prop": 0.5282758620689655,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "c731304ccdd536f13a6af9ad9597e765f4bbb882",
"size": "3642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinow_client/models/cart_rule_restriction_group_item.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
import asyncio
import discord
from datetime import datetime
from discord.ext import commands
from shutil import copyfile
import time
import json
import os
import copy
import subprocess
try:
import pymongo
except ImportError:
# I mean, it doesn't really matter as it can still revert to JSON
print("pymongo not installed, preparing to use JSON")
pass
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the cog
bot.add_cog(Settings(bot))
class MemberRole:
def __init__(self, **kwargs):
self.member = kwargs.get("member", None)
self.add_roles = kwargs.get("add_roles", [])
self.rem_roles = kwargs.get("rem_roles", [])
if type(self.member) == discord.Member:
self.guild = self.member.guild
else:
self.guild = None
class RoleManager:
# Init with the bot reference
def __init__(self, bot):
self.bot = bot
self.sleep = 1
self.delay = 0.2
self.next_member_delay = 1
self.running = True
self.q = asyncio.Queue()
self.loop_list = [self.bot.loop.create_task(self.check_roles())]
def clean_up(self):
self.running = False
for task in self.loop_list:
task.cancel()
async def check_roles(self):
while self.running:
# Try with a queue I suppose
current_role = await self.q.get()
await self.check_member_role(current_role)
self.q.task_done()
async def check_member_role(self, r):
if r.guild == None or r.member == None:
# Not applicable
return
if not r.guild.me.guild_permissions.manage_roles:
# Missing permissions to manage roles
return
# Let's add roles
if len(r.add_roles):
try:
await r.member.add_roles(*r.add_roles)
except Exception as e:
if not type(e) is discord.Forbidden:
try:
print(e)
except:
pass
pass
if len(r.add_roles) and len(r.rem_roles):
# Pause for a sec before continuing
await asyncio.sleep(self.delay)
if len(r.rem_roles):
try:
await r.member.remove_roles(*r.rem_roles)
except Exception as e:
if not type(e) is discord.Forbidden:
try:
print(e)
except:
pass
pass
def _update(self, member, *, add_roles = [], rem_roles = []):
# Updates an existing record - or adds a new one
if not type(member) == discord.Member:
# Can't change roles without a guild
return
# Check first if any of the add_roles are above our own
top_index = member.guild.me.top_role.position
new_add = []
new_rem = []
for a in add_roles:
if not a:
continue
if a.position < top_index:
# Can add this one
new_add.append(a)
for r in rem_roles:
if not r:
continue
if r.position < top_index:
# Can remove this one
new_rem.append(r)
if len(new_add) == 0 and len(new_rem) == 0:
# Nothing to do here
return
self.q.put_nowait(MemberRole(member=member, add_roles=new_add, rem_roles=new_rem))
def add_roles(self, member, role_list):
# Adds the member and roles as a MemberRole object to the heap
self._update(member, add_roles=role_list)
def rem_roles(self, member, role_list):
# Adds the member and roles as a MemberRole object to the heap
self._update(member, rem_roles=role_list)
def change_roles(self, member, *, add_roles = [], rem_roles = []):
# Adds the member and both role types as a MemberRole object to the heap
self._update(member, add_roles=add_roles, rem_roles=rem_roles)
# This is the settings module - it allows the other modules to work with
# a global settings variable and to make changes
class Settings(commands.Cog):
"""The Doorway To The Server Settings"""
# Let's initialize with a file location
def __init__(self, bot, prefix = "$", file : str = None):
if file == None:
# We weren't given a file, default to ./Settings.json
file = "Settings.json"
self.file = file
self.backupDir = "Settings-Backup"
self.backupMax = 100
self.backupTime = 7200 # runs every 2 hours
self.backupWait = 10 # initial wait time before first backup
self.settingsDump = 3600 # runs every hour
self.databaseDump = 300 # runs every 5 minutes
self.jsonOnlyDump = 600 # runs every 10 minutes if no database
self.bot = bot
self.flush_lock = False # locked when flushing settings - so we can't flush multiple times
self.prefix = prefix
self.loop_list = []
self.role = RoleManager(bot)
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
self.defaultServer = { # Negates Name and ID - those are added dynamically to each new server
"DefaultRole" : "", # Auto-assigned role position
"TempRole" : None, # Assign a default temporary role
"TempRoleTime" : 2, # Number of minutes before temp role expires
"TempRoleList" : [], # List of temporary roles
"TempRolePM" : False, # Do we pm when a user is given a temp role?
"DefaultXP" : 0, # Default xp given to each new member on join
"DefaultXPReserve" : 10, # Default xp reserve given to new members on join
"AdminLock" : False, # Does the bot *only* answer to admins?
"TableFlipMute" : False, # Do we mute people who flip tables?
"IgnoreDeath" : True, # Does the bot keep talking post-mortem?
"DJArray" : [], # List of roles that can use music
"FilteredWords" : [], # List of words to filter out of user messages
"UserRoles" : [], # List of roles users can self-select
"UserRoleBlock" : [], # List of users blocked from UserRoles
"OnlyOneUserRole" : True, # Limits user role selection to one at a time
"YTMultiple" : False, # Shows a list of 5 videos per yt search with play
"RequiredXPRole" : "", # ID or blank for Everyone
"RequiredLinkRole" : "", # ID or blank for Admin-Only
"RequiredTagRole" : "", # ID or blank for Admin-Only
"RequiredHackRole" : "", # ID or blank for Admin-Only
"RequiredKillRole" : "", # ID or blank for Admin-Only
"RequiredStopRole" : "", # ID or blank for Admin-Only
"TeleChannel" : "", # ID or blank for disabled
"TeleConnected" : False, # Disconnect any lingering calls
"LastCallHidden" : False, # Was the last call with *67?
"TeleNumber" : None, # The 7-digit number of the server
"TeleBlock" : [], # List of blocked numbers
"MadLibsChannel" : "", # ID or blank for any channel
"ChatChannel" : "", # ID or blank for no channel
"HardwareChannel" : "", # ID or blank for no channel
"DefaultChannel" : "", # ID or blank for no channel
"WelcomeChannel" : None, # ID or None for no channel
"LastChat" : 0, # UTC Timestamp of last chat message
"PlayingMadLibs" : False, # Yes if currently playing MadLibs
"LastAnswer" : "", # URL to last {prefix}question post
"StrikeOut" : 3, # Number of strikes needed for consequence
"KickList" : [], # List of id's that have been kicked
"BanList" : [], # List of id's that have been banned
"Prefix" : None, # Custom Prefix
"AutoPCPP" : None, # Auto-format pcpartpicker links?
"XP Count" : 10, # Default number of xp transactions to log
"XP Array" : [], # Holds the xp transaction list
"XPLimit" : None, # The maximum xp a member can get
"XPReserveLimit" : None, # The maximum xp reserve a member can get
"XpBlockArray" : [], # List of roles/users blocked from xp
"HourlyXP" : 3, # How much xp reserve per hour
"HourlyXPReal" : 0, # How much xp per hour (typically 0)
"XPPerMessage" : 0, # How much xp per message (typically 0)
"XPRPerMessage" : 0, # How much xp reserve per message (typically 0)
"RequireOnline" : True, # Must be online for xp?
"AdminUnlimited" : True, # Do admins have unlimited xp to give?
"BotAdminAsAdmin" : False, # Do bot-admins count as admins with xp?
"RemindOffline" : False, # Let users know when they ping offline members
"JoinPM" : False, # Do we pm new users with rules?
"XPPromote" : True, # Can xp raise your rank?
"XPDemote" : False, # Can xp lower your rank?
"SuppressPromotions" : False, # Do we suppress the promotion message?
"SuppressDemotions" : False, # Do we suppress the demotion message?
"TotalMessages" : 0, # The total number of messages the bot has witnessed
"Killed" : False, # Is the bot dead?
"KilledBy" : "", # Who killed the bot?
"LastShrug" : "", # Who shrugged last?
"LastLenny" : "", # Who Lenny'ed last?
"VerificationTime" : 0, # Time to wait (in minutes) before assigning default role
"LastPicture" : 0, # UTC Timestamp of last picture uploaded
"PictureThreshold" : 10, # Number of seconds to wait before allowing pictures
"Rules" : "Be nice to each other.",
"Welcome" : "Welcome *[[user]]* to *[[server]]!*",
"Goodbye" : "Goodbye *[[user]]*, *[[server]]* will miss you!",
"Info" : "", # This is where you can say a bit about your server
"PromotionArray" : [], # An array of roles for promotions
"OnlyOneRole" : False, # Only allow one role from the promo array at a time
"Hunger" : 0, # The bot's hunger % 0-100 (can also go negative)
"HungerLock" : False, # Will the bot stop answering at 100% hunger?
"SuppressMentions" : True, # Will the bot suppress @here and @everyone in its own output?
"Volume" : "", # Float volume for music player
"DefaultVolume" : 0.6, # Default volume for music player
"Playlisting" : None, # Not adding a playlist
"PlaylistRequestor" : None, # No one requested a playlist
"IgnoredUsers" : [], # List of users that are ignored by the bot
"LastComic" : [], # List of julian dates for last comic
"Hacks" : [], # List of hack tips
"Links" : [], # List of links
"Tags" : [], # List of tags
"Members" : {}, # List of members
"AdminArray" : [], # List of admin roles
"GifArray" : [], # List of roles that can use giphy
"LogChannel" : "", # ID or blank for no logging
"LogVars" : [], # List of options to log
"DisabledCommands" : [], # List of disabled command names
"AdminDisabledAccess" : True, # Can admins access disabled commands?
"BAdminDisabledAccess" : True, # Can bot-admins access disabled commands?
"DisabledReactions" : True, # Does the bot react to disabled commands?
"VoteKickChannel" : None, # ID or none if not setup
"VoteKickMention" : None, # ID of role to mention - or none for no mention
"VotesToMute" : 0, # Positive number - or 0 for disabled
"VotesToMention" : 0, # Positive number - or 0 for disabled
"VotesMuteTime" : 0, # Number of seconds to mute - or 0 for disabled
"VotesResetTime" : 0, # Number of seconds to roll off - or 0 for disabled
"VoteKickArray" : [], # Contains a list of users who were voted to kick - and who voted against them
"VoteKickAnon" : False, # Are vk messages deleted after sending?
"QuoteReaction" : None, # Trigger reaction for quoting messages
"QuoteChannel" : None, # Channel id for quotes
"QuoteAdminOnly" : True, # Only admins/bot-admins can quote?
"StreamChannel" : None, # None or channel id
"StreamList" : [], # List of user id's to watch for
"StreamMessage" : "Hey everyone! *[[user]]* started streaming *[[game]]!* Check it out here: [[url]]",
"MuteList" : []} # List of muted members
# Removed for spam
# "ChannelMOTD" : {}} # List of channel messages of the day
self.serverDict = {
"Servers" : {}
}
self.ip = "localhost"
self.port = 27017
try:
# Will likely fail if we don't have pymongo
print("Connecting to database on {}:{}...".format(self.ip, self.port))
client = pymongo.MongoClient(self.ip, self.port, serverSelectionTimeoutMS=100)
except:
client = None
# See whether we actually connected to the database, this will throw an exception if not and if it does let's fall back on the JSON
try:
client.server_info()
print("Established connection!")
self.using_db = True
except Exception:
print("Connection failed, trying JSON")
self.using_db = False
pass
self.migrated = False
if self.using_db:
self.db = client['pooter']
# Check if we need to migrate some things
self.migrate(file)
# Load the database into the serverDict variable
self.load_local()
else:
# Fix the flush time to the jsonOnlyDump
self.settingsDump = self.jsonOnlyDump
self.load_json(file)
def load_json(self, file):
if os.path.exists(file):
print("Since no mongoDB instance was running, I'm reverting back to the Settings.json")
self.serverDict = json.load(open(file))
else:
self.serverDict = {}
def migrate(self, _file):
if os.path.exists(_file):
try:
settings_json = json.load(open(_file))
if "mongodb_migrated" not in settings_json:
print("Settings.json file found, migrating it to database....")
self.serverDict = settings_json
self.migrated = True
self.flushSettings(both=True)
else:
print("Settings.json file found, not migrating, because it has already been done!")
except Exception:
print("Migrating failed... Rip")
self.serverDict = {}
def load_local(self):
# Load the database to the serverDict dictionary
print("Loading database to RAM...")
# For some sharding I guess?
server_ids = [str(guild.id) for guild in self.bot.guilds]
for collection_name in self.db.collection_names():
if collection_name == "Global":
global_collection = self.db.get_collection("Global").find_one()
if global_collection:
for key, value in global_collection.items():
self.serverDict[key] = value
continue
# Sharding... only if the guild is accessible append it.
if collection_name in server_ids:
collection = self.db.get_collection(collection_name).find_one()
self.serverDict["Servers"][collection_name] = collection
print("Loaded database to RAM.")
def suppressed(self, guild, msg):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(guild, "SuppressMentions"):
return Nullify.clean(msg)
else:
return msg
async def onjoin(self, member, server):
# Welcome - and initialize timers
try:
vt = time.time() + int(self.getServerStat(server,"VerificationTime",0)) * 60
except:
vt = 0
self.setUserStat(member,server,"VerificationTime",vt)
if not member.bot:
self.bot.loop.create_task(self.giveRole(member, server))
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
# Flush settings
self.flushSettings(self.file, True)
# Shutdown role manager loop
self.role.clean_up()
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
# Check all verifications - and start timers if needed
self.loop_list.append(self.bot.loop.create_task(self.checkAll()))
# Start the backup loop
self.loop_list.append(self.bot.loop.create_task(self.backup()))
# Start the settings loop
self.loop_list.append(self.bot.loop.create_task(self.flushLoop()))
# Start the database loop
self.loop_list.append(self.bot.loop.create_task(self.flushLoopDB()))
async def checkAll(self):
# Check all verifications - and start timers if needed
for server in self.bot.guilds:
# Check if we can even manage roles here
if not server.me.guild_permissions.manage_roles:
continue
# Get default role
defRole = self.getServerStat(server, "DefaultRole")
defRole = DisplayName.roleForID(defRole, server)
if defRole:
# We have a default - check for it
for member in server.members:
if member.bot:
# skip bots
continue
foundRole = False
for role in member.roles:
if role == defRole:
# We have our role
foundRole = True
if not foundRole:
# We don't have the role - set a timer
self.loop_list.append(self.bot.loop.create_task(self.giveRole(member, server)))
async def giveRole(self, member, server):
# Start the countdown
verifiedAt = self.getUserStat(member, server, "VerificationTime")
try:
verifiedAt = int(verifiedAt)
except ValueError:
verifiedAt = 0
currentTime = int(time.time())
timeRemain = verifiedAt - currentTime
if timeRemain > 0:
# We have to wait for verification still
await asyncio.sleep(timeRemain)
# We're already verified - make sure we have the role
defRole = self.getServerStat(server, "DefaultRole")
defRole = DisplayName.roleForID(defRole, server)
if defRole:
# We have a default - check for it
foundRole = False
for role in member.roles:
if role == defRole:
# We have our role
foundRole = True
break
if not foundRole:
try:
self.role.add_roles(member, [defRole])
except:
pass
if task in self.loop_list:
self.loop_list.remove(task)
async def backup(self):
# Works only for JSON files, not for database yet... :(
# Works only for JSON files, not for database yet... :(
# Works only for JSON files, not for database yet... :(
# Wait initial time - then start loop
await asyncio.sleep(self.backupWait)
while not self.bot.is_closed():
# Initial backup - then wait
if not os.path.exists(self.backupDir):
# Create it
os.makedirs(self.backupDir)
# Flush backup
timeStamp = datetime.today().strftime("%Y-%m-%d %H.%M")
self.flushSettings("./{}/Backup-{}.json".format(self.backupDir, timeStamp), True)
# Get curr dir and change curr dir
retval = os.getcwd()
os.chdir(self.backupDir)
# Get reverse sorted backups
backups = sorted(os.listdir(os.getcwd()), key=os.path.getmtime)
numberToRemove = None
if len(backups) > self.backupMax:
# We have more than 100 backups right now, let's prune
numberToRemove = len(backups)-self.backupMax
for i in range(0, numberToRemove):
os.remove(backups[i])
# Restore curr dir
os.chdir(retval)
if numberToRemove:
print("Settings Backed Up ({} removed): {}".format(numberToRemove, timeStamp))
else:
print("Settings Backed Up: {}".format(timeStamp))
await asyncio.sleep(self.backupTime)
def getOwners(self):
ownerList = self.serverDict.get("Owner",[])
ownerList = [] if ownerList == None else ownerList
if isinstance(ownerList,list) and not len(ownerList):
return ownerList
if not isinstance(ownerList,list):
# We have a string, convert
ownerList = [ int(ownerList) ]
# At this point - we should have a list
# Let's make sure all parties exist still
owners = list(set([x.id for x in self.bot.get_all_members() if x.id in ownerList]))
# Update the setting if there were changes - or if we had no owners prior
if len(owners) != len(ownerList):
self.serverDict['Owner'] = owners
return owners
def isOwner(self, member):
owners = self.getOwners()
if not len(owners):
return None
return member.id in owners
def getServerDict(self):
# Returns the server dictionary
return self.serverDict
# Let's make sure the server is in our list
def checkServer(self, server):
# Assumes server = discord.Server and serverList is a dict
if not "Servers" in self.serverDict:
# Let's add an empty placeholder
self.serverDict["Servers"] = {}
if str(server.id) in self.serverDict["Servers"]:
# Found it
# Verify all the default keys have values
for key in self.defaultServer:
if not key in self.serverDict["Servers"][str(server.id)]:
#print("Adding: {} -> {}".format(key, server.name))
if type(self.defaultServer[key]) == dict:
self.serverDict["Servers"][str(server.id)][key] = {}
elif type(self.defaultServer[key]) == list:
# We have lists/dicts - copy them
self.serverDict["Servers"][str(server.id)][key] = copy.deepcopy(self.defaultServer[key])
else:
self.serverDict["Servers"][str(server.id)][key] = self.defaultServer[key]
else:
# We didn't locate our server
# print("Server not located, adding...")
# Set name and id - then compare to default server
self.serverDict["Servers"][str(server.id)] = {}
for key in self.defaultServer:
if type(self.defaultServer[key]) == dict:
self.serverDict["Servers"][str(server.id)][key] = {}
elif type(self.defaultServer[key]) == list:
# We have lists/dicts - copy them
self.serverDict["Servers"][str(server.id)][key] = copy.deepcopy(self.defaultServer[key])
else:
self.serverDict["Servers"][str(server.id)][key] = self.defaultServer[key]
# Let's make sure the user is in the specified server
def removeServer(self, server):
# Check for our server name
self.serverDict["Servers"].pop(str(server.id), None)
self.checkGlobalUsers()
def removeServerID(self, id):
# Check for our server ID
self.serverDict["Servers"].pop(str(id), None)
self.checkGlobalUsers()
#"""""""""""""""""""""""""#
#""" NEEDS TO BE FIXED """#
#"""""""""""""""""""""""""#
def removeChannel(self, channel):
motdArray = self.settings.getServerStat(channel.guild, "ChannelMOTD")
for a in motdArray:
# Get the channel that corresponds to the id
if str(a['ID']) == str(channel.id):
# We found it - throw an error message and return
motdArray.remove(a)
self.setServerStat(server, "ChannelMOTD", motdArray)
def removeChannelID(self, id, server):
found = False
for x in self.serverDict["Servers"]:
if str(x["ID"]) == str(server.id):
for y in x["ChannelMOTD"]:
if y["ID"] == id:
found = True
x["ChannelMOTD"].remove(y)
###
# TODO: Work through this method to make things more efficient
# Maybe don't set default values for each user - but make sure
# they have an empty dict in the Members dict, then keep a
# member_defaults dict with default values that could be set like:
#
# return self.serverDict["Servers"].get(str(server.id),{"Members":{}})["Members"].get(str(user.id),{}).get(stat, member_defaults.get(stat, None))
#
# As that would fall back on the default stat if the passed stat didn't exist
# and fall back on None if the stat itself isn't in the defaults
#
# This may also be a useful technique for adding servers, although
# that happens way less frequently.
###
# Let's make sure the user is in the specified server
def checkUser(self, user, server):
# Make sure our server exists in the list
self.checkServer(server)
if str(user.id) in self.serverDict["Servers"][str(server.id)]["Members"]:
y = self.serverDict["Servers"][str(server.id)]["Members"][str(user.id)]
needsUpdate = False
if not "XP" in y:
y["XP"] = int(self.getServerStat(server, "DefaultXP"))
needsUpdate = True
# XP needs to be an int - and uh... got messed up once so we check it here
if type(y["XP"]) is float:
y["XP"] = int(y["XP"])
if not "XPLeftover" in y:
y["XPLeftover"] = 0
needsUpdate = True
if not "XPRealLeftover" in y:
y["XPRealLeftover"] = 0
needsUpdate = True
if not "XPReserve" in y:
y["XPReserve"] = int(self.getServerStat(server, "DefaultXPReserve"))
needsUpdate = True
if not "Parts" in y:
y["Parts"] = ""
needsUpdate = True
if not "Muted" in y:
y["Muted"] = False
needsUpdate = True
if not "LastOnline" in y:
y["LastOnline"] = None
needsUpdate = True
if not "Cooldown" in y:
y["Cooldown"] = None
needsUpdate = True
if not "Reminders" in y:
y["Reminders"] = []
needsUpdate = True
if not "Strikes" in y:
y["Strikes"] = []
needsUpdate = True
if not "StrikeLevel" in y:
y["StrikeLevel"] = 0
needsUpdate = True
if not "Profiles" in y:
y["Profiles"] = []
needsUpdate = True
if not "TempRoles" in y:
y["TempRoles"] = []
needsUpdate = True
if not "UTCOffset" in y:
y["UTCOffset"] = None
needsUpdate = True
if not "LastCommand" in y:
y["LastCommand"] = 0
if not "Hardware" in y:
y["Hardware"] = []
if not "VerificationTime" in y:
currentTime = int(time.time())
waitTime = int(self.getServerStat(server, "VerificationTime"))
y["VerificationTime"] = currentTime + (waitTime * 60)
else:
needsUpdate = True
# We didn't locate our user - add them
newUser = { "XP" : int(self.getServerStat(server, "DefaultXP")),
"XPReserve" : (self.getServerStat(server, "DefaultXPReserve")),
"Parts" : "",
"Muted" : False,
"LastOnline" : "Unknown",
"Reminders" : [],
"Profiles" : [] }
if not newUser["XP"]:
newUser["XP"] = 0
if not newUser["XPReserve"]:
newUser["XPReserve"] = 0
self.serverDict["Servers"][str(server.id)]["Members"][str(user.id)] = newUser
# Global Stat
def getGlobalStat(self, stat, default = None):
return self.serverDict.get(stat, default)
def setGlobalStat(self, stat, value):
self.serverDict[stat] = value
def delGlobalStat(self, stat):
return self.serverDict.pop(stat,None)
# Let's make sure the user is in the specified server
def removeUser(self, user, server):
# Make sure our server exists in the list
self.checkServer(server)
self.serverDict["Servers"][str(server.id)]["Members"].pop(str(user.id), None)
self.checkGlobalUsers()
def checkGlobalUsers(self):
# Just return from this method - may be erroneously dropping users' settings
return 0
# This whole method should be reworked to not require
# a couple loops to remove users - but since it's not
# something that's run all the time, it's probably not
# a big issue for now
try:
userList = self.serverDict['GlobalMembers']
except:
userList = {}
remove_users = []
check_list = [str(x.id) for x in self.bot.get_all_members()]
for u in userList:
if u in check_list:
continue
# Can't find... delete!
remove_users.append(u)
for u in remove_users:
userList.pop(u, None)
self.serverDict['GlobalMembers'] = userList
return len(remove_users)
# Let's make sure the user is in the specified server
def removeUserID(self, id, server):
# Make sure our server exists in the list
self.checkServer(server)
self.serverDict["Servers"][str(server.id)]["Members"].pop(str(id), None)
self.checkGlobalUsers()
# Return the requested stat
def getUserStat(self, user, server, stat, default = None):
# Make sure our user and server exists in the list
self.checkUser(user, server)
return self.serverDict["Servers"].get(str(server.id),{}).get("Members",{}).get(str(user.id),{}).get(stat,default)
def getGlobalUserStat(self, user, stat, default = None):
# Loop through options, and get the most common
try:
userList = self.serverDict['GlobalMembers']
except:
return None
return userList.get(str(user.id),{}).get(stat,default)
# Set the provided stat
def setUserStat(self, user, server, stat, value):
# Make sure our user and server exists in the list
self.checkUser(user, server)
self.serverDict["Servers"][str(server.id)]["Members"][str(user.id)][stat] = value
# Set a provided global stat
def setGlobalUserStat(self, user, stat, value):
try:
userList = self.serverDict['GlobalMembers']
except:
userList = {}
if str(user.id) in userList:
userList[str(user.id)][stat] = value
return
userList[str(user.id)] = { stat : value }
self.serverDict['GlobalMembers'] = userList
# Increment a specified user stat by a provided amount
# returns the stat post-increment, or None if error
def incrementStat(self, user, server, stat, incrementAmount):
# Make sure our user and server exist
self.checkUser(user, server)
# Get initial value - set to 0 if doesn't exist
value = self.serverDict["Servers"].get(str(server.id),{}).get("Members",{}).get(str(user.id),{}).get(stat,0)
self.serverDict["Servers"][str(server.id)]["Members"][str(user.id)][stat] = value+incrementAmount
return value+incrementAmount
# Get the requested stat
def getServerStat(self, server, stat, default = None):
# Make sure our server exists in the list
self.checkServer(server)
return self.serverDict["Servers"].get(str(server.id),{}).get(stat,default)
# Set the provided stat
def setServerStat(self, server, stat, value):
# Make sure our server exists in the list
self.checkServer(server)
self.serverDict["Servers"][str(server.id)][stat] = value
@commands.command(pass_context=True)
async def dumpsettings(self, ctx):
"""Sends the Settings.json file to the owner."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
message = await ctx.message.author.send('Uploading *Settings.json*...')
await ctx.message.author.send(file=discord.File('Settings.json'))
await message.edit(content='Uploaded *Settings.json!*')
@commands.command(pass_context=True)
async def ownerlock(self, ctx):
"""Locks/unlocks the bot to only respond to the owner."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
# We have an owner - and the owner is talking to us
# Let's try and get the OwnerLock setting and toggle it
try:
ownerLock = self.serverDict['OwnerLock']
except KeyError:
ownerLock = False
# OwnerLock defaults to "No"
if not ownerLock:
self.serverDict['OwnerLock'] = True
msg = 'Owner lock **Enabled**.'
await self.bot.change_presence(activity=discord.Activity(name="OwnerLocked", type=0))
# await self.bot.change_presence(game=discord.Game(name="OwnerLocked"))
else:
self.serverDict['OwnerLock'] = False
msg = 'Owner lock **Disabled**.'
'''if self.serverDict["Game"]:
# Reset the game if there was one
await self.bot.change_presence(game=discord.Game(name=self.serverDict["Game"]))
else:
# Set to nothing - no game prior
await self.bot.change_presence(game=None)'''
await self.bot.change_presence(activity=discord.Activity(status=self.serverDict.get("Status", None), name=self.serverDict.get("Game", None), url=self.serverDict.get("Stream", None), type=self.serverDict.get("Type", 0)))
await channel.send(msg)
@commands.command(pass_context=True)
async def owners(self, ctx):
"""Lists the bot's current owners."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
ownerList = self.getOwners()
if not len(ownerList):
# No owners.
msg = 'I have not been claimed, *yet*.'
else:
msg = 'I am owned by '
userList = []
for owner in ownerList:
# Get the owner's name
user = self.bot.get_user(int(owner))
if not user:
userString = "*Unknown User ({})*".format(owner)
else:
userString = "*{}#{}*".format(user.name, user.discriminator)
userList.append(userString)
msg += ', '.join(userList)
await channel.send(msg)
@commands.command(pass_context=True)
async def claim(self, ctx):
"""Claims the bot if disowned - once set, can only be changed by the current owner."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
member = author
owned = self.isOwner(ctx.author)
if owned:
# We're an owner
msg = "You're already one of my owners."
elif owned == False:
# We're not an owner
msg = "I've already been claimed."
else:
# Claim it up
self.serverDict['Owner'] = [ctx.author.id]
msg = 'I have been claimed by *{}!*'.format(DisplayName.name(member))
await channel.send(msg)
@commands.command(pass_context=True)
async def addowner(self, ctx, *, member : str = None):
"""Adds an owner to the owner list. Can only be done by a current owner."""
owned = self.isOwner(ctx.author)
if owned == False:
msg = "Only an existing owner can add more owners."
await ctx.channel.send(msg)
return
if member == None:
member = ctx.author
if type(member) is str:
memberCheck = DisplayName.memberForName(member, ctx.guild)
if memberCheck:
member = memberCheck
else:
msg = 'I couldn\'t find that user...'
await ctx.channel.send(msg)
return
if member.bot:
msg = "I can't be owned by other bots. I don't roll that way."
await ctx.channel.send(msg)
return
owners = self.getOwners()
if member.id in owners:
# Already an owner
msg = "Don't get greedy now - *{}* is already an owner.".format(DisplayName.name(member))
else:
owners.append(member.id)
self.serverDict['Owner'] = owners
msg = '*{}* has been added to my owner list!'.format(DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def remowner(self, ctx, *, member : str = None):
"""Removes an owner from the owner list. Can only be done by a current owner."""
owned = self.isOwner(ctx.author)
if owned == False:
msg = "Only an existing owner can remove owners."
await ctx.channel.send(msg)
return
if member == None:
member = ctx.author
if type(member) is str:
memberCheck = DisplayName.memberForName(member, ctx.guild)
if memberCheck:
member = memberCheck
else:
msg = 'I couldn\'t find that user...'
await ctx.channel.send(msg)
return
owners = self.getOwners()
if member.id in owners:
# Already an owner
msg = "*{}* is no longer an owner.".format(DisplayName.name(member))
owners.remove(member.id)
self.serverDict['Owner'] = owners
else:
msg = "*{}* can't be removed because they're not one of my owners.".format(DisplayName.name(member))
if not len(self.serverDict['Owner']):
# No more owners
msg += " I have been disowned!"
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def disown(self, ctx):
"""Revokes all ownership of the bot."""
owned = self.isOwner(ctx.author)
if owned == False:
msg = "Only an existing owner can revoke ownership."
await ctx.channel.send(msg)
return
elif owned == None:
# No owners
msg = 'I have already been disowned...'
await ctx.channel.send(msg)
return
self.serverDict['Owner'] = []
msg = 'I have been disowned!'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def getstat(self, ctx, stat : str = None, member : discord.Member = None):
"""Gets the value for a specific stat for the listed member (case-sensitive)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
if member == None:
member = author
if str == None:
msg = 'Usage: `{}getstat [stat] [member]`'.format(ctx.prefix)
await channel.send(msg)
return
if type(member) is str:
try:
member = discord.utils.get(server.members, name=member)
except:
print("That member does not exist")
return
if member is None:
msg = 'Usage: `{}getstat [stat] [member]`'.format(ctx.prefix)
await channel.send(msg)
return
try:
newStat = self.getUserStat(member, server, stat)
except KeyError:
msg = '"{}" is not a valid stat for *{}*'.format(stat, DisplayName.name(member))
await channel.send(msg)
return
msg = '**{}** for *{}* is *{}!*'.format(stat, DisplayName.name(member), newStat)
await channel.send(msg)
@commands.command(pass_context=True)
async def setsstat(self, ctx, stat : str = None, value : str = None):
"""Sets a server stat (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = Utils.is_admin(ctx)
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if stat == None or value == None:
msg = 'Usage: `{}setsstat Stat Value`'.format(ctx.prefix)
await channel.send(msg)
return
self.setServerStat(server, stat, value)
msg = '**{}** set to *{}!*'.format(stat, value)
await channel.send(msg)
@commands.command(pass_context=True)
async def getsstat(self, ctx, stat : str = None):
"""Gets a server stat (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
isAdmin = Utils.is_admin(ctx)
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if stat == None:
msg = 'Usage: `{}getsstat [stat]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
value = self.getServerStat(server, stat)
msg = '**{}** is currently *{}!*'.format(stat, value)
await channel.send(msg)
@commands.command(pass_context=True)
async def flush(self, ctx):
"""Flush the bot settings to disk (admin only)."""
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
# Flush settings
message = await ctx.send("Flushing settings to disk...")
# Actually flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings, self.file, True)
msg = 'Flushed settings to disk.'
await message.edit(content=msg)
# Flush loop - run every 10 minutes
async def flushLoopDB(self):
if not self.using_db:
return
print('Starting flush loop for database - runs every {} seconds.'.format(self.databaseDump))
while not self.bot.is_closed():
await asyncio.sleep(self.databaseDump)
# Flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings)
# Flush loop json - run every 10 minutes
async def flushLoop(self):
print('Starting flush loop - runs every {} seconds.'.format(self.settingsDump))
while not self.bot.is_closed():
await asyncio.sleep(self.settingsDump)
# Flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings, self.file)
# Flush settings to disk
def flushSettings(self, _file = None, both = False):
if self.flush_lock:
print("Flush locked")
l = 0
while True:
l += 1
# Let's loop (up to 100 times) until the settings have been flushed
# to ensure that all commands calling this return
# properly when flushing settings
if not self.flush_lock:
# unlocked - return
return
# Still locked - make sure we're not over 100, and then sleep for 3 seconds
if l > 100:
self.flush_lock = False
return
time.sleep(3)
try:
# Lock the settings
self.flush_lock = True
def flush_db():
global_collection = self.db.get_collection("Global").find_one()
old_data = copy.deepcopy(global_collection)
for key, value in self.serverDict.items():
if key == "Servers":
continue
if not global_collection:
self.db["Global"].insert_one({key:value})
self.flush_lock = False
return
global_collection[key] = value
self.db["Global"].replace_one(old_data, global_collection)
for key, value in self.serverDict["Servers"].items():
collection = self.db.get_collection(key).find_one()
if not collection:
self.db[key].insert_one(value)
else:
new_data = self.serverDict["Servers"][key]
self.db[key].delete_many({})
self.db[key].insert_one(new_data)
if not _file:
if not self.using_db:
# Not using a database, so we can't flush ;)
self.flush_lock = False
return
# We *are* using a database, let's flush
flush_db()
print("Flushed to DB!")
elif (both or not self.using_db) and _file:
if os.path.exists(_file):
# Delete file - then flush new settings
os.remove(_file)
# Get a pymongo object out of the dict
json_ready = self.serverDict
json_ready.pop("_id", None)
json_ready["mongodb_migrated"] = True
json.dump(json_ready, open(_file, 'w'), indent=2)
# Not using a database, so we can't flush ;)
if not self.using_db:
print("Flushed to {}!".format(_file))
self.flush_lock = False
return
# We *are* using a database, let's flush!
flush_db()
print("Flushed to DB and {}!".format(_file))
except Exception as e:
# Something terrible happened - let's make sure our file is unlocked
print("Error flushing settings:\n"+str(e))
pass
self.flush_lock = False
@commands.command(pass_context=True)
async def prunelocalsettings(self, ctx):
"""Compares the current server's settings to the default list and removes any non-standard settings (owner only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
message = await ctx.send("Pruning local settings...")
removedSettings = 0
settingsWord = "settings"
if str(server.id) in self.serverDict["Servers"]:
removeKeys = []
for key in self.serverDict["Servers"][str(server.id)]:
if not key in self.defaultServer:
# Key isn't in default list - clear it
removeKeys.append(key)
removedSettings += 1
for key in removeKeys:
self.serverDict["Servers"][str(server.id)].pop(key, None)
if removedSettings == 1:
settingsWord = "setting"
await message.edit(content="Flushing settings to disk...", embed=None)
# Actually flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings, self.file, True)
msg = 'Pruned *{} {}*.'.format(removedSettings, settingsWord)
await message.edit(content=msg, embed=None)
def _prune_servers(self):
# Remove any orphaned servers
removed = 0
servers = []
for server in self.serverDict["Servers"]:
# Check if the bot is still connected to the server
g_check = self.bot.get_guild(int(server))
if not g_check:
servers.append(server)
for server in servers:
self.serverDict["Servers"].pop(server, None)
removed += 1
return removed
def _prune_users(self):
# Remove any orphaned servers
removed = 0
for server in self.serverDict["Servers"]:
# Check if the bot is still connected to the server
g_check = self.bot.get_guild(int(server))
if not g_check:
# Skip
continue
mems = []
for mem in self.serverDict["Servers"][server]["Members"]:
m_check = g_check.get_member(int(mem))
if not m_check:
mems.append(mem)
for mem in mems:
self.serverDict["Servers"][server]["Members"].pop(mem, None)
removed += 1
return removed
'''def _prune_channels(self):
# Remove orphaned MOTD settings
removed = 0
for server in self.serverDict["Servers"]:
# Check if the bot is still connected to the server
g_check = self.bot.get_guild(int(server))
if not g_check:
# Skip
continue
chans = []
for chan in self.serverDict["Servers"][server]["ChannelMOTD"]:
c_check = g_check.get_channel(int(chan))
if not c_check:
chans.append(chan)
for chan in chans:
self.serverDict["Servers"][server]["ChannelMOTD"].pop(chan, None)
removed += 1
return removed'''
def _prune_settings(self):
# Remove orphaned settings
removed = 0
for server in self.serverDict["Servers"]:
# Check if the bot is still connected to the server
g_check = self.bot.get_guild(int(server))
if not g_check:
# Skip
continue
keys = []
for key in self.serverDict["Servers"][server]:
if not key in self.defaultServer:
keys.append(key)
for key in keys:
self.serverDict["Servers"][server].pop(key, None)
removed += 1
return removed
@commands.command(pass_context=True)
async def prunesettings(self, ctx):
"""Compares all connected servers' settings to the default list and removes any non-standard settings (owner only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
removedSettings = 0
settingsWord = "settings"
message = await ctx.send("Pruning settings...")
for serv in self.serverDict["Servers"]:
# Found it - let's check settings
removeKeys = []
for key in self.serverDict["Servers"][serv]:
if not key in self.defaultServer:
if key == "Name" or key == "ID":
continue
# Key isn't in default list - clear it
removeKeys.append(key)
removedSettings += 1
for key in removeKeys:
self.serverDict["Servers"][serv].pop(key, None)
if removedSettings == 1:
settingsWord = "setting"
await message.edit(content="Flushing settings to disk...")
# Actually flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings, self.file, True)
msg = 'Pruned *{} {}*.'.format(removedSettings, settingsWord)
await message.edit(content=msg)
@commands.command(pass_context=True)
async def prune(self, ctx):
"""Iterate through all members on all connected servers and remove orphaned settings (owner only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
# Only allow owner
isOwner = self.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
await ctx.channel.send(msg)
return
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
await ctx.channel.send(msg)
return
message = await ctx.send("Pruning all orphaned members and settings...")
ser = self._prune_servers()
sst = self._prune_settings()
mem = self._prune_users()
#cha = self._prune_channels()
glo = self.checkGlobalUsers()
ser_str = "servers"
sst_str = "settings"
mem_str = "members"
#cha_str = "channels"
glo_str = "global users"
if ser == 1:
ser_str = "server"
if sst == 1:
sst_str = "setting"
if mem == 1:
mem_str = "member"
#if cha == 1:
# cha_str = "channel"
if glo == 1:
glo_str = "global user"
await message.edit(content="Flushing settings to disk...")
# Actually flush settings asynchronously here
l = asyncio.get_event_loop()
await self.bot.loop.run_in_executor(None, self.flushSettings, self.file, True)
msg = 'Pruned *{} {}*, *{} {}*, *{} {}*, and *{} {}*.'.format(ser, ser_str, sst, sst_str, mem, mem_str, glo, glo_str)
await message.edit(content=msg)
| {
"content_hash": "e6833ff809d748c73d4790b0d10a1c08",
"timestamp": "",
"source": "github",
"line_count": 1435,
"max_line_length": 222,
"avg_line_length": 34.44250871080139,
"alnum_prop": 0.6445928174001012,
"repo_name": "corpnewt/CorpBot.py",
"id": "32fd72fd130a2d3cf9f6ed8fa14a1b1b6f69b2c1",
"size": "49425",
"binary": false,
"copies": "1",
"ref": "refs/heads/rewrite",
"path": "Cogs/Settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "21938"
},
{
"name": "Python",
"bytes": "1371709"
},
{
"name": "Shell",
"bytes": "1598"
}
],
"symlink_target": ""
} |
import numpy as np
# from autograd import grad
# from numpy import linalg as la
from sklearn import linear_model
import scipy as sp
import scipy.optimize
import os
from nltk.tokenize import RegexpTokenizer
import re
from collections import Counter
from gensim.models import word2vec
import sys
w2vDir = '/fs/clip-scratch/shing/output/sgWordPhrase'
class SentimentRetrofit(object):
def __init__(self, vectors=None, vocab=None, dim=50, lambDa=0.05):
self.vectors = vectors
self.vocab = vocab
self.dim = dim
self.lambDa = lambDa
# {name: (pos or neg, {word_index: freq)}
self.documentDictPos = Counter()
self.documentDictNeg = Counter()
self.lin_clf = linear_model.LogisticRegression()
self.posX = []
self.negX = []
self.wordSet = set()
self.word2indx = {}
self.wordNum = 0
self.tokenizer = RegexpTokenizer(r"[\w'-]+")
def loadVocab(self, fname):
"""
load vocab in imdb
"""
print 'loading imdb vocab...'
with open(fname, 'r') as vocabFile:
for line in vocabFile:
token = line.strip(' \n')
if len(token) > 0 and token not in self.wordSet and (self.vocab is None or token in self.vocab):
self.wordSet.add(token)
def buildVocab(self):
if self.vectors is None:
# self.originalVec = np.zeros((len(self.word2indx), self.dim))
self.originalVec = []
for indx in range(len(self.word2indx)):
vec = self.makeRandVector(self.dim)
self.originalVec = np.append(self.originalVec, vec)
self.originalVec = self.originalVec.reshape((len(self.word2indx), self.dim))
else:
indx2word = {self.word2indx[word]: word for word in self.word2indx}
self.originalVec = np.zeros(0)
for indx in range(len(self.word2indx)):
word = indx2word[indx]
vec = self.vectors[word]
self.originalVec = np.append(self.originalVec, vec)
self.originalVec = self.originalVec.reshape((len(self.word2indx), self.dim))
print 'original vec is of dimension:', self.originalVec.shape
def loadDocument(self, directory, polarity):
print 'loading document at ' + directory
for idx, filename in enumerate(os.listdir(directory)):
if idx > 100:
break
if filename.split('.')[-1] == "txt":
# {word_index: freq}
with open(directory + filename, 'r') as file:
line = file.read()
bow = self.convertDocument2Bow(line)
if self.vectors:
vec = self.convertDocument2Vec(line)
if polarity == 'pos':
self.documentDictPos.update(bow)
if self.vectors:
self.posX.append(vec)
else:
self.documentDictNeg.update(bow)
if self.vectors:
self.negX.append(vec)
def convertDocument2Bow(self, line):
tokenList = self.tokenizer.tokenize(line.lower())
bow = Counter()
for token in tokenList:
if token in self.word2indx:
bow[self.word2indx[token]] += 1
elif token in self.wordSet:
self.word2indx[token] = self.wordNum
self.wordNum += 1
return bow
def convertDocument2Vec(self, line):
tokenList = self.tokenizer.tokenize(line.lower())
bow = Counter()
for token in tokenList:
if token in self.vocab:
bow[token] += 1.0
vec = np.zeros(self.dim)
for word in bow:
vec += self.vectors[word] * bow[word]
vec = self.normalize(vec)
return vec
def generateSample(self):
print 'generating samples...'
self.x = []
self.y = []
for pos, neg in zip(self.posX, self.negX):
self.y.append(1)
self.x.append(pos)
self.y.append(0)
self.x.append(neg)
def train(self):
print 'training...'
self.lin_clf.fit(self.x, self.y)
def regresserParam(self):
self.generateSample()
self.train()
return np.append(self.lin_clf.coef_[0], [1.0])
def initalVal(self):
smallRand = []
for indx in range(len(self.word2indx)):
vec = self.makeSmallRandVector(self.dim)
smallRand = np.append(smallRand, vec)
if not self.vectors:
initialVec = self.makeRandVector(self.dim + 1)
# for indx in range(len(self.word2indx)):
# vec = self.makeRandVector(self.dim)
# initialVec = np.append(initialVec, vec)
initialVec = np.append(initialVec, self.originalVec.reshape(len(self.word2indx)*self.dim)+smallRand)
else:
initialVec = self.regresserParam()
vec = self.originalVec.reshape(len(self.word2indx)*self.dim) + smallRand
initialVec = np.append(initialVec, vec)
return initialVec
def makeRandVector(self, dims):
mu, sigma = 0, 1
vec = np.random.normal(mu, sigma, dims)
return self.normalize(vec)
def makeSmallRandVector(self, dims):
mu, sigma = 0, 1
vec = np.random.normal(mu, sigma, dims)
return self.normalize(vec) * 0.1
def normalize(self, v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v/norm
def objectiveSentimentRetrofit(self, param):
phi = param[:self.dim + 1]
retroVec = param[self.dim + 1:].reshape((len(self.word2indx), self.dim))
# {name: (pos or neg, {word_index: freq)}
score = 0.0
bow = self.documentDictPos
for wordId in bow:
score += np.log(1.0 + np.exp(-np.dot(phi[:-1], retroVec[wordId]) - phi[-1])) * bow[wordId]
bow = self.documentDictNeg
for wordId in bow:
score += np.log(1.0 + np.exp(np.dot(phi[:-1], retroVec[wordId]) + phi[-1])) * bow[wordId]
score += self.lambDa * np.linalg.norm(retroVec - self.originalVec)**2
return score
def word2grad(self, size, phi, vec, wordId):
grad = np.zeros(size)
np.put(grad, np.arange(vec.size), vec)
np.put(grad, [vec.size], [1.0])
start = self.dim + 1 + wordId * self.dim
np.put(grad, np.arange(start, start + self.dim), phi[:-1])
return grad
def gradient(self, param):
phi = param[:self.dim + 1]
retroVec = param[self.dim + 1:].reshape((len(self.word2indx), self.dim))
grad = np.zeros(param.size)
bow = self.documentDictPos
for wordId in bow:
grad += -self.word2grad(param.size, phi, retroVec[wordId], wordId) / (1.0 + np.exp(np.dot(phi[:-1], retroVec[wordId]) + phi[-1])) * bow[wordId]
bow = self.documentDictNeg
for wordId in bow:
grad += self.word2grad(param.size, phi, retroVec[wordId], wordId) / (1.0 + np.exp(-np.dot(phi[:-1], retroVec[wordId]) - phi[-1])) * bow[wordId]
grad += 2 * self.lambDa * np.append(np.zeros(self.dim + 1), (retroVec - self.originalVec).reshape(len(self.word2indx)*self.dim))
return grad
def minimize(self):
print 'Start minimization...'
self.optimLBFGS = sp.optimize.fmin_l_bfgs_b(self.objectiveSentimentRetrofit,
x0=self.initalVal(),
fprime=self.gradient,
pgtol=5e-2, disp=True, maxiter=1000)
print 'minimization done.'
newVec = self.optimLBFGS[0][self.dim + 1:].reshape((len(self.word2indx), self.dim))
self.newVectors = {}
indx2word = {self.word2indx[word]: word for word in self.word2indx}
for indx in range(len(self.word2indx)):
word = indx2word[indx]
if word not in self.newVectors:
self.newVectors[word] = newVec[indx]
print self.optimLBFGS[1:]
def writeWordVectors(self, outputFileOld, outputFileNew):
print 'writing to file...'
indx2word = {self.word2indx[word]: word for word in self.word2indx}
with open(outputFileNew, 'w') as output:
for index in range(len(indx2word)):
vocab = indx2word[index]
output.write(vocab)
npVec = self.newVectors[vocab]
vecStr = np.array2string(npVec, max_line_width='infty', precision=8)
vecStr = vecStr.replace('[', ' ')
vecStr = re.sub(r' +', ' ', vecStr)
output.write(vecStr[:-1])
output.write('\n')
with open(outputFileOld, 'w') as output:
for index in range(len(indx2word)):
vocab = indx2word[index]
output.write(vocab)
npVec = self.originalVec[index]
vecStr = np.array2string(npVec, max_line_width='infty', precision=8)
vecStr = vecStr.replace('[', ' ')
vecStr = re.sub(r' +', ' ', vecStr)
output.write(vecStr[:-1])
output.write('\n')
def debug(self):
for word in self.word2indx:
print word
def checkGrad(self):
print 'start checking'
initialVec = self.initalVal()
print 'initialized', initialVec
print scipy.optimize.check_grad(func=self.objectiveSentimentRetrofit, grad=self.gradient, x0=initialVec)
if __name__ == '__main__':
sys.stderr.write('Reading vectors from file...\n')
model = word2vec.Word2Vec.load(w2vDir)
vectorDim = len(model[model.vocab.iterkeys().next()])
wordVectors = model
sys.stderr.write('Loaded vectors from file...\n')
vocab = {word: model.vocab[word].index for word in model.vocab}
sys.stderr.write('Finished reading vectors.\n')
retrofitter = SentimentRetrofit(vectors=wordVectors, vocab=vocab, dim=vectorDim)
# retrofitter = SentimentRetrofit()
retrofitter.loadVocab('./aclImdb/imdb.vocab')
retrofitter.loadDocument('./aclImdb/train/pos/', 'pos')
retrofitter.loadDocument('./aclImdb/train/neg/', 'neg')
retrofitter.buildVocab()
# retrofitter.debug()
# retrofitter.checkGrad()
retrofitter.minimize()
retrofitter.writeWordVectors('./output/sgOld.txt', './output/sgNew.txt')
# retrofitter.loadVocab('./aclImdb/imdbTest.vocab')
# retrofitter.loadDocument('./aclImdb/train/testRunPos/', 'pos')
# retrofitter.loadDocument('./aclImdb/train/testRunNeg/', 'neg')
# retrofitter.minimize()
# retrofitter.writeWordVectors('./output/sentimentVec.txt')
| {
"content_hash": "11d576eeb46948e0f2f880b15d378d24",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 155,
"avg_line_length": 39.35379061371841,
"alnum_prop": 0.5697642418126777,
"repo_name": "sidenver/ConstituentRetrofit",
"id": "ba88d14ae4fa8fb5d8a93190c460f391e1ed27c2",
"size": "10927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentiment_retrofit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "153273"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'comment10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Foo')
worksheet.write_comment('B2', 'Some text', {'color': '#98fe97'})
worksheet.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "003916d2c8d23810e683ca79604e9465",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 26.243243243243242,
"alnum_prop": 0.6292481977342945,
"repo_name": "seize-the-dave/XlsxWriter",
"id": "3147c24078f4a82f249ae9b947f6825ac6e71cfe",
"size": "1144",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_comment10.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7453"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2343625"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
from .mixins import ResourceMixin
class ProductStatus(object):
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
def __repr__(self):
return '<ProductStatus %s: %s>' % (
self.productId, self.title
)
def get_internal_id(self):
return self.productId.split(':')[-1]
def has_issue_level(self, level):
if hasattr(self, 'dataQualityIssues'):
for issue in self.dataQualityIssues:
if issue['severity'] == level:
return True
return False
@property
def should_be_removed(self):
return self.has_issue_level('critical')
class ProductStatusManager(ResourceMixin):
scope = 'productstatuses'
resource_class = ProductStatus
single_resource_id = 'productId'
def remove_critical_issues(self):
"""
Iterator through all statuses and remove products
which has severity of critical
"""
for status in self.list():
if status.should_be_removed:
self.manager.products.delete(
status.get_internal_id()
)
| {
"content_hash": "f87ae6df9a52a967004cdb06d98e6c8f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 57,
"avg_line_length": 26.906976744186046,
"alnum_prop": 0.5773552290406223,
"repo_name": "geeknam/python-google-shopping",
"id": "6cf6cb814df8a3989bc85088e35a69f93e44698b",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_shopping/product_statuses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9382"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.analytics.admin_v1alpha.services.analytics_admin_service import pagers
from google.analytics.admin_v1alpha.types import access_report, analytics_admin
from google.analytics.admin_v1alpha.types import audience
from google.analytics.admin_v1alpha.types import audience as gaa_audience
from google.analytics.admin_v1alpha.types import resources
from .transports.base import DEFAULT_CLIENT_INFO, AnalyticsAdminServiceTransport
from .transports.grpc import AnalyticsAdminServiceGrpcTransport
from .transports.grpc_asyncio import AnalyticsAdminServiceGrpcAsyncIOTransport
from .transports.rest import AnalyticsAdminServiceRestTransport
class AnalyticsAdminServiceClientMeta(type):
"""Metaclass for the AnalyticsAdminService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AnalyticsAdminServiceTransport]]
_transport_registry["grpc"] = AnalyticsAdminServiceGrpcTransport
_transport_registry["grpc_asyncio"] = AnalyticsAdminServiceGrpcAsyncIOTransport
_transport_registry["rest"] = AnalyticsAdminServiceRestTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[AnalyticsAdminServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AnalyticsAdminServiceClient(metaclass=AnalyticsAdminServiceClientMeta):
"""Service Interface for the Analytics Admin API (GA4)."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "analyticsadmin.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AnalyticsAdminServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AnalyticsAdminServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AnalyticsAdminServiceTransport:
"""Returns the transport used by the client instance.
Returns:
AnalyticsAdminServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def account_path(
account: str,
) -> str:
"""Returns a fully-qualified account string."""
return "accounts/{account}".format(
account=account,
)
@staticmethod
def parse_account_path(path: str) -> Dict[str, str]:
"""Parses a account path into its component segments."""
m = re.match(r"^accounts/(?P<account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def account_summary_path(
account_summary: str,
) -> str:
"""Returns a fully-qualified account_summary string."""
return "accountSummaries/{account_summary}".format(
account_summary=account_summary,
)
@staticmethod
def parse_account_summary_path(path: str) -> Dict[str, str]:
"""Parses a account_summary path into its component segments."""
m = re.match(r"^accountSummaries/(?P<account_summary>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def attribution_settings_path(
property: str,
) -> str:
"""Returns a fully-qualified attribution_settings string."""
return "properties/{property}/attributionSettings".format(
property=property,
)
@staticmethod
def parse_attribution_settings_path(path: str) -> Dict[str, str]:
"""Parses a attribution_settings path into its component segments."""
m = re.match(r"^properties/(?P<property>.+?)/attributionSettings$", path)
return m.groupdict() if m else {}
@staticmethod
def audience_path(
property: str,
audience: str,
) -> str:
"""Returns a fully-qualified audience string."""
return "properties/{property}/audiences/{audience}".format(
property=property,
audience=audience,
)
@staticmethod
def parse_audience_path(path: str) -> Dict[str, str]:
"""Parses a audience path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/audiences/(?P<audience>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def conversion_event_path(
property: str,
conversion_event: str,
) -> str:
"""Returns a fully-qualified conversion_event string."""
return "properties/{property}/conversionEvents/{conversion_event}".format(
property=property,
conversion_event=conversion_event,
)
@staticmethod
def parse_conversion_event_path(path: str) -> Dict[str, str]:
"""Parses a conversion_event path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/conversionEvents/(?P<conversion_event>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def custom_dimension_path(
property: str,
custom_dimension: str,
) -> str:
"""Returns a fully-qualified custom_dimension string."""
return "properties/{property}/customDimensions/{custom_dimension}".format(
property=property,
custom_dimension=custom_dimension,
)
@staticmethod
def parse_custom_dimension_path(path: str) -> Dict[str, str]:
"""Parses a custom_dimension path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/customDimensions/(?P<custom_dimension>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def custom_metric_path(
property: str,
custom_metric: str,
) -> str:
"""Returns a fully-qualified custom_metric string."""
return "properties/{property}/customMetrics/{custom_metric}".format(
property=property,
custom_metric=custom_metric,
)
@staticmethod
def parse_custom_metric_path(path: str) -> Dict[str, str]:
"""Parses a custom_metric path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/customMetrics/(?P<custom_metric>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def data_retention_settings_path(
property: str,
) -> str:
"""Returns a fully-qualified data_retention_settings string."""
return "properties/{property}/dataRetentionSettings".format(
property=property,
)
@staticmethod
def parse_data_retention_settings_path(path: str) -> Dict[str, str]:
"""Parses a data_retention_settings path into its component segments."""
m = re.match(r"^properties/(?P<property>.+?)/dataRetentionSettings$", path)
return m.groupdict() if m else {}
@staticmethod
def data_sharing_settings_path(
account: str,
) -> str:
"""Returns a fully-qualified data_sharing_settings string."""
return "accounts/{account}/dataSharingSettings".format(
account=account,
)
@staticmethod
def parse_data_sharing_settings_path(path: str) -> Dict[str, str]:
"""Parses a data_sharing_settings path into its component segments."""
m = re.match(r"^accounts/(?P<account>.+?)/dataSharingSettings$", path)
return m.groupdict() if m else {}
@staticmethod
def data_stream_path(
property: str,
data_stream: str,
) -> str:
"""Returns a fully-qualified data_stream string."""
return "properties/{property}/dataStreams/{data_stream}".format(
property=property,
data_stream=data_stream,
)
@staticmethod
def parse_data_stream_path(path: str) -> Dict[str, str]:
"""Parses a data_stream path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/dataStreams/(?P<data_stream>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def display_video360_advertiser_link_path(
property: str,
display_video_360_advertiser_link: str,
) -> str:
"""Returns a fully-qualified display_video360_advertiser_link string."""
return "properties/{property}/displayVideo360AdvertiserLinks/{display_video_360_advertiser_link}".format(
property=property,
display_video_360_advertiser_link=display_video_360_advertiser_link,
)
@staticmethod
def parse_display_video360_advertiser_link_path(path: str) -> Dict[str, str]:
"""Parses a display_video360_advertiser_link path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/displayVideo360AdvertiserLinks/(?P<display_video_360_advertiser_link>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def display_video360_advertiser_link_proposal_path(
property: str,
display_video_360_advertiser_link_proposal: str,
) -> str:
"""Returns a fully-qualified display_video360_advertiser_link_proposal string."""
return "properties/{property}/displayVideo360AdvertiserLinkProposals/{display_video_360_advertiser_link_proposal}".format(
property=property,
display_video_360_advertiser_link_proposal=display_video_360_advertiser_link_proposal,
)
@staticmethod
def parse_display_video360_advertiser_link_proposal_path(
path: str,
) -> Dict[str, str]:
"""Parses a display_video360_advertiser_link_proposal path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/displayVideo360AdvertiserLinkProposals/(?P<display_video_360_advertiser_link_proposal>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def firebase_link_path(
property: str,
firebase_link: str,
) -> str:
"""Returns a fully-qualified firebase_link string."""
return "properties/{property}/firebaseLinks/{firebase_link}".format(
property=property,
firebase_link=firebase_link,
)
@staticmethod
def parse_firebase_link_path(path: str) -> Dict[str, str]:
"""Parses a firebase_link path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/firebaseLinks/(?P<firebase_link>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def global_site_tag_path(
property: str,
data_stream: str,
) -> str:
"""Returns a fully-qualified global_site_tag string."""
return "properties/{property}/dataStreams/{data_stream}/globalSiteTag".format(
property=property,
data_stream=data_stream,
)
@staticmethod
def parse_global_site_tag_path(path: str) -> Dict[str, str]:
"""Parses a global_site_tag path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/dataStreams/(?P<data_stream>.+?)/globalSiteTag$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def google_ads_link_path(
property: str,
google_ads_link: str,
) -> str:
"""Returns a fully-qualified google_ads_link string."""
return "properties/{property}/googleAdsLinks/{google_ads_link}".format(
property=property,
google_ads_link=google_ads_link,
)
@staticmethod
def parse_google_ads_link_path(path: str) -> Dict[str, str]:
"""Parses a google_ads_link path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/googleAdsLinks/(?P<google_ads_link>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def google_signals_settings_path(
property: str,
) -> str:
"""Returns a fully-qualified google_signals_settings string."""
return "properties/{property}/googleSignalsSettings".format(
property=property,
)
@staticmethod
def parse_google_signals_settings_path(path: str) -> Dict[str, str]:
"""Parses a google_signals_settings path into its component segments."""
m = re.match(r"^properties/(?P<property>.+?)/googleSignalsSettings$", path)
return m.groupdict() if m else {}
@staticmethod
def measurement_protocol_secret_path(
property: str,
data_stream: str,
measurement_protocol_secret: str,
) -> str:
"""Returns a fully-qualified measurement_protocol_secret string."""
return "properties/{property}/dataStreams/{data_stream}/measurementProtocolSecrets/{measurement_protocol_secret}".format(
property=property,
data_stream=data_stream,
measurement_protocol_secret=measurement_protocol_secret,
)
@staticmethod
def parse_measurement_protocol_secret_path(path: str) -> Dict[str, str]:
"""Parses a measurement_protocol_secret path into its component segments."""
m = re.match(
r"^properties/(?P<property>.+?)/dataStreams/(?P<data_stream>.+?)/measurementProtocolSecrets/(?P<measurement_protocol_secret>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def property_path(
property: str,
) -> str:
"""Returns a fully-qualified property string."""
return "properties/{property}".format(
property=property,
)
@staticmethod
def parse_property_path(path: str) -> Dict[str, str]:
"""Parses a property path into its component segments."""
m = re.match(r"^properties/(?P<property>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def user_link_path(
account: str,
user_link: str,
) -> str:
"""Returns a fully-qualified user_link string."""
return "accounts/{account}/userLinks/{user_link}".format(
account=account,
user_link=user_link,
)
@staticmethod
def parse_user_link_path(path: str) -> Dict[str, str]:
"""Parses a user_link path into its component segments."""
m = re.match(r"^accounts/(?P<account>.+?)/userLinks/(?P<user_link>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, AnalyticsAdminServiceTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the analytics admin service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, AnalyticsAdminServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
NOTE: "rest" transport functionality is currently in a
beta state (preview). We welcome your feedback via an
issue in this library's source repository.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AnalyticsAdminServiceTransport):
# transport is a AnalyticsAdminServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def get_account(
self,
request: Optional[Union[analytics_admin.GetAccountRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Account:
r"""Lookup for a single Account.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_account():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetAccountRequest(
name="name_value",
)
# Make the request
response = client.get_account(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetAccountRequest, dict]):
The request object. Request message for GetAccount RPC.
name (str):
Required. The name of the account to
lookup. Format: accounts/{account}
Example: "accounts/100"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Account:
A resource message representing a
Google Analytics account.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetAccountRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetAccountRequest):
request = analytics_admin.GetAccountRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_account]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_accounts(
self,
request: Optional[Union[analytics_admin.ListAccountsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAccountsPager:
r"""Returns all accounts accessible by the caller.
Note that these accounts might not currently have GA4
properties. Soft-deleted (ie: "trashed") accounts are
excluded by default. Returns an empty list if no
relevant accounts are found.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_accounts():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListAccountsRequest(
)
# Make the request
page_result = client.list_accounts(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListAccountsRequest, dict]):
The request object. Request message for ListAccounts
RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListAccountsPager:
Request message for ListAccounts RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListAccountsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListAccountsRequest):
request = analytics_admin.ListAccountsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_accounts]
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAccountsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_account(
self,
request: Optional[Union[analytics_admin.DeleteAccountRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Marks target Account as soft-deleted (ie: "trashed")
and returns it.
This API does not have a method to restore soft-deleted
accounts. However, they can be restored using the Trash
Can UI.
If the accounts are not restored before the expiration
time, the account and all child resources (eg:
Properties, GoogleAdsLinks, Streams, UserLinks) will be
permanently purged.
https://support.google.com/analytics/answer/6154772
Returns an error if the target is not found.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_account():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteAccountRequest(
name="name_value",
)
# Make the request
client.delete_account(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteAccountRequest, dict]):
The request object. Request message for DeleteAccount
RPC.
name (str):
Required. The name of the Account to
soft-delete. Format: accounts/{account}
Example: "accounts/100"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteAccountRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteAccountRequest):
request = analytics_admin.DeleteAccountRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_account]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_account(
self,
request: Optional[Union[analytics_admin.UpdateAccountRequest, dict]] = None,
*,
account: Optional[resources.Account] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Account:
r"""Updates an account.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_account():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
account = admin_v1alpha.Account()
account.display_name = "display_name_value"
request = admin_v1alpha.UpdateAccountRequest(
account=account,
)
# Make the request
response = client.update_account(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateAccountRequest, dict]):
The request object. Request message for UpdateAccount
RPC.
account (google.analytics.admin_v1alpha.types.Account):
Required. The account to update. The account's ``name``
field is used to identify the account.
This corresponds to the ``account`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Account:
A resource message representing a
Google Analytics account.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([account, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateAccountRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateAccountRequest):
request = analytics_admin.UpdateAccountRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if account is not None:
request.account = account
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_account]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("account.name", request.account.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def provision_account_ticket(
self,
request: Optional[
Union[analytics_admin.ProvisionAccountTicketRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.ProvisionAccountTicketResponse:
r"""Requests a ticket for creating an account.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_provision_account_ticket():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ProvisionAccountTicketRequest(
)
# Make the request
response = client.provision_account_ticket(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ProvisionAccountTicketRequest, dict]):
The request object. Request message for
ProvisionAccountTicket RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.ProvisionAccountTicketResponse:
Response message for
ProvisionAccountTicket RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ProvisionAccountTicketRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ProvisionAccountTicketRequest):
request = analytics_admin.ProvisionAccountTicketRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.provision_account_ticket]
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_account_summaries(
self,
request: Optional[
Union[analytics_admin.ListAccountSummariesRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAccountSummariesPager:
r"""Returns summaries of all accounts accessible by the
caller.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_account_summaries():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListAccountSummariesRequest(
)
# Make the request
page_result = client.list_account_summaries(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListAccountSummariesRequest, dict]):
The request object. Request message for
ListAccountSummaries RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListAccountSummariesPager:
Response message for
ListAccountSummaries RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListAccountSummariesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListAccountSummariesRequest):
request = analytics_admin.ListAccountSummariesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_account_summaries]
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAccountSummariesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_property(
self,
request: Optional[Union[analytics_admin.GetPropertyRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Property:
r"""Lookup for a single "GA4" Property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_property():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetPropertyRequest(
name="name_value",
)
# Make the request
response = client.get_property(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetPropertyRequest, dict]):
The request object. Request message for GetProperty RPC.
name (str):
Required. The name of the property to lookup. Format:
properties/{property_id} Example: "properties/1000"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Property:
A resource message representing a
Google Analytics GA4 property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetPropertyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetPropertyRequest):
request = analytics_admin.GetPropertyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_property]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_properties(
self,
request: Optional[Union[analytics_admin.ListPropertiesRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPropertiesPager:
r"""Returns child Properties under the specified parent
Account.
Only "GA4" properties will be returned.
Properties will be excluded if the caller does not have
access. Soft-deleted (ie: "trashed") properties are
excluded by default. Returns an empty list if no
relevant properties are found.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_properties():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListPropertiesRequest(
filter="filter_value",
)
# Make the request
page_result = client.list_properties(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListPropertiesRequest, dict]):
The request object. Request message for ListProperties
RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListPropertiesPager:
Response message for ListProperties
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListPropertiesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListPropertiesRequest):
request = analytics_admin.ListPropertiesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_properties]
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPropertiesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_property(
self,
request: Optional[Union[analytics_admin.CreatePropertyRequest, dict]] = None,
*,
property: Optional[resources.Property] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Property:
r"""Creates an "GA4" property with the specified location
and attributes.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_property():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
property = admin_v1alpha.Property()
property.display_name = "display_name_value"
property.time_zone = "time_zone_value"
request = admin_v1alpha.CreatePropertyRequest(
property=property,
)
# Make the request
response = client.create_property(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreatePropertyRequest, dict]):
The request object. Request message for CreateProperty
RPC.
property (google.analytics.admin_v1alpha.types.Property):
Required. The property to create.
Note: the supplied property must specify
its parent.
This corresponds to the ``property`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Property:
A resource message representing a
Google Analytics GA4 property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([property])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreatePropertyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreatePropertyRequest):
request = analytics_admin.CreatePropertyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if property is not None:
request.property = property
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_property]
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_property(
self,
request: Optional[Union[analytics_admin.DeletePropertyRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Property:
r"""Marks target Property as soft-deleted (ie: "trashed")
and returns it.
This API does not have a method to restore soft-deleted
properties. However, they can be restored using the
Trash Can UI.
If the properties are not restored before the expiration
time, the Property and all child resources (eg:
GoogleAdsLinks, Streams, UserLinks) will be permanently
purged.
https://support.google.com/analytics/answer/6154772
Returns an error if the target is not found, or is not
an GA4 Property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_property():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeletePropertyRequest(
name="name_value",
)
# Make the request
response = client.delete_property(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeletePropertyRequest, dict]):
The request object. Request message for DeleteProperty
RPC.
name (str):
Required. The name of the Property to soft-delete.
Format: properties/{property_id} Example:
"properties/1000"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Property:
A resource message representing a
Google Analytics GA4 property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeletePropertyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeletePropertyRequest):
request = analytics_admin.DeletePropertyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_property]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_property(
self,
request: Optional[Union[analytics_admin.UpdatePropertyRequest, dict]] = None,
*,
property: Optional[resources.Property] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Property:
r"""Updates a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_property():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
property = admin_v1alpha.Property()
property.display_name = "display_name_value"
property.time_zone = "time_zone_value"
request = admin_v1alpha.UpdatePropertyRequest(
property=property,
)
# Make the request
response = client.update_property(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdatePropertyRequest, dict]):
The request object. Request message for UpdateProperty
RPC.
property (google.analytics.admin_v1alpha.types.Property):
Required. The property to update. The property's
``name`` field is used to identify the property to be
updated.
This corresponds to the ``property`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Property:
A resource message representing a
Google Analytics GA4 property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([property, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdatePropertyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdatePropertyRequest):
request = analytics_admin.UpdatePropertyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if property is not None:
request.property = property
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_property]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("property.name", request.property.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_user_link(
self,
request: Optional[Union[analytics_admin.GetUserLinkRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.UserLink:
r"""Gets information about a user's link to an account or
property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_user_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetUserLinkRequest(
name="name_value",
)
# Make the request
response = client.get_user_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetUserLinkRequest, dict]):
The request object. Request message for GetUserLink RPC.
name (str):
Required. Example format:
accounts/1234/userLinks/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.UserLink:
A resource message representing a
user's permissions on an Account or
Property resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetUserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetUserLinkRequest):
request = analytics_admin.GetUserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_user_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_get_user_links(
self,
request: Optional[Union[analytics_admin.BatchGetUserLinksRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.BatchGetUserLinksResponse:
r"""Gets information about multiple users' links to an
account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_batch_get_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.BatchGetUserLinksRequest(
parent="parent_value",
names=['names_value1', 'names_value2'],
)
# Make the request
response = client.batch_get_user_links(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.BatchGetUserLinksRequest, dict]):
The request object. Request message for
BatchGetUserLinks RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.BatchGetUserLinksResponse:
Response message for
BatchGetUserLinks RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.BatchGetUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.BatchGetUserLinksRequest):
request = analytics_admin.BatchGetUserLinksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_get_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_user_links(
self,
request: Optional[Union[analytics_admin.ListUserLinksRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListUserLinksPager:
r"""Lists all user links on an account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListUserLinksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_user_links(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListUserLinksRequest, dict]):
The request object. Request message for ListUserLinks
RPC.
parent (str):
Required. Example format:
accounts/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListUserLinksPager:
Response message for ListUserLinks
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListUserLinksRequest):
request = analytics_admin.ListUserLinksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListUserLinksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def audit_user_links(
self,
request: Optional[Union[analytics_admin.AuditUserLinksRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.AuditUserLinksPager:
r"""Lists all user links on an account or property,
including implicit ones that come from effective
permissions granted by groups or organization admin
roles.
If a returned user link does not have direct
permissions, they cannot be removed from the account or
property directly with the DeleteUserLink command. They
have to be removed from the group/etc that gives them
permissions, which is currently only usable/discoverable
in the GA or GMP UIs.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_audit_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.AuditUserLinksRequest(
parent="parent_value",
)
# Make the request
page_result = client.audit_user_links(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.AuditUserLinksRequest, dict]):
The request object. Request message for AuditUserLinks
RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.AuditUserLinksPager:
Response message for AuditUserLinks
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.AuditUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.AuditUserLinksRequest):
request = analytics_admin.AuditUserLinksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.audit_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.AuditUserLinksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_user_link(
self,
request: Optional[Union[analytics_admin.CreateUserLinkRequest, dict]] = None,
*,
parent: Optional[str] = None,
user_link: Optional[resources.UserLink] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.UserLink:
r"""Creates a user link on an account or property.
If the user with the specified email already has
permissions on the account or property, then the user's
existing permissions will be unioned with the
permissions specified in the new UserLink.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_user_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateUserLinkRequest(
parent="parent_value",
)
# Make the request
response = client.create_user_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateUserLinkRequest, dict]):
The request object. Request message for CreateUserLink
RPC.
Users can have multiple email addresses associated with
their Google account, and one of these email addresses
is the "primary" email address. Any of the email
addresses associated with a Google account may be used
for a new UserLink, but the returned UserLink will
always contain the "primary" email address. As a result,
the input and output email address for this request may
differ.
parent (str):
Required. Example format:
accounts/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
user_link (google.analytics.admin_v1alpha.types.UserLink):
Required. The user link to create.
This corresponds to the ``user_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.UserLink:
A resource message representing a
user's permissions on an Account or
Property resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, user_link])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateUserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateUserLinkRequest):
request = analytics_admin.CreateUserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if user_link is not None:
request.user_link = user_link
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_user_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_create_user_links(
self,
request: Optional[
Union[analytics_admin.BatchCreateUserLinksRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.BatchCreateUserLinksResponse:
r"""Creates information about multiple users' links to an
account or property.
This method is transactional. If any UserLink cannot be
created, none of the UserLinks will be created.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_batch_create_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
requests = admin_v1alpha.CreateUserLinkRequest()
requests.parent = "parent_value"
request = admin_v1alpha.BatchCreateUserLinksRequest(
parent="parent_value",
requests=requests,
)
# Make the request
response = client.batch_create_user_links(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.BatchCreateUserLinksRequest, dict]):
The request object. Request message for
BatchCreateUserLinks RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.BatchCreateUserLinksResponse:
Response message for
BatchCreateUserLinks RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.BatchCreateUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.BatchCreateUserLinksRequest):
request = analytics_admin.BatchCreateUserLinksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_create_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_user_link(
self,
request: Optional[Union[analytics_admin.UpdateUserLinkRequest, dict]] = None,
*,
user_link: Optional[resources.UserLink] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.UserLink:
r"""Updates a user link on an account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_user_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateUserLinkRequest(
)
# Make the request
response = client.update_user_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateUserLinkRequest, dict]):
The request object. Request message for UpdateUserLink
RPC.
user_link (google.analytics.admin_v1alpha.types.UserLink):
Required. The user link to update.
This corresponds to the ``user_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.UserLink:
A resource message representing a
user's permissions on an Account or
Property resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([user_link])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateUserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateUserLinkRequest):
request = analytics_admin.UpdateUserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if user_link is not None:
request.user_link = user_link
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_user_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("user_link.name", request.user_link.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_update_user_links(
self,
request: Optional[
Union[analytics_admin.BatchUpdateUserLinksRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.BatchUpdateUserLinksResponse:
r"""Updates information about multiple users' links to an
account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_batch_update_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.BatchUpdateUserLinksRequest(
parent="parent_value",
)
# Make the request
response = client.batch_update_user_links(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.BatchUpdateUserLinksRequest, dict]):
The request object. Request message for
BatchUpdateUserLinks RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.BatchUpdateUserLinksResponse:
Response message for
BatchUpdateUserLinks RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.BatchUpdateUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.BatchUpdateUserLinksRequest):
request = analytics_admin.BatchUpdateUserLinksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_update_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_user_link(
self,
request: Optional[Union[analytics_admin.DeleteUserLinkRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a user link on an account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_user_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteUserLinkRequest(
name="name_value",
)
# Make the request
client.delete_user_link(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteUserLinkRequest, dict]):
The request object. Request message for DeleteUserLink
RPC.
name (str):
Required. Example format:
accounts/1234/userLinks/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteUserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteUserLinkRequest):
request = analytics_admin.DeleteUserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_user_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def batch_delete_user_links(
self,
request: Optional[
Union[analytics_admin.BatchDeleteUserLinksRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes information about multiple users' links to an
account or property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_batch_delete_user_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
requests = admin_v1alpha.DeleteUserLinkRequest()
requests.name = "name_value"
request = admin_v1alpha.BatchDeleteUserLinksRequest(
parent="parent_value",
requests=requests,
)
# Make the request
client.batch_delete_user_links(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.BatchDeleteUserLinksRequest, dict]):
The request object. Request message for
BatchDeleteUserLinks RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.BatchDeleteUserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.BatchDeleteUserLinksRequest):
request = analytics_admin.BatchDeleteUserLinksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_delete_user_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def create_firebase_link(
self,
request: Optional[
Union[analytics_admin.CreateFirebaseLinkRequest, dict]
] = None,
*,
parent: Optional[str] = None,
firebase_link: Optional[resources.FirebaseLink] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.FirebaseLink:
r"""Creates a FirebaseLink.
Properties can have at most one FirebaseLink.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_firebase_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateFirebaseLinkRequest(
parent="parent_value",
)
# Make the request
response = client.create_firebase_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateFirebaseLinkRequest, dict]):
The request object. Request message for
CreateFirebaseLink RPC
parent (str):
Required. Format: properties/{property_id} Example:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
firebase_link (google.analytics.admin_v1alpha.types.FirebaseLink):
Required. The Firebase link to
create.
This corresponds to the ``firebase_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.FirebaseLink:
A link between a GA4 property and a
Firebase project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, firebase_link])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateFirebaseLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateFirebaseLinkRequest):
request = analytics_admin.CreateFirebaseLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if firebase_link is not None:
request.firebase_link = firebase_link
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_firebase_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_firebase_link(
self,
request: Optional[
Union[analytics_admin.DeleteFirebaseLinkRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a FirebaseLink on a property
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_firebase_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteFirebaseLinkRequest(
name="name_value",
)
# Make the request
client.delete_firebase_link(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteFirebaseLinkRequest, dict]):
The request object. Request message for
DeleteFirebaseLink RPC
name (str):
Required. Format:
properties/{property_id}/firebaseLinks/{firebase_link_id}
Example: properties/1234/firebaseLinks/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteFirebaseLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteFirebaseLinkRequest):
request = analytics_admin.DeleteFirebaseLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_firebase_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def list_firebase_links(
self,
request: Optional[Union[analytics_admin.ListFirebaseLinksRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFirebaseLinksPager:
r"""Lists FirebaseLinks on a property.
Properties can have at most one FirebaseLink.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_firebase_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListFirebaseLinksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_firebase_links(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListFirebaseLinksRequest, dict]):
The request object. Request message for
ListFirebaseLinks RPC
parent (str):
Required. Format: properties/{property_id} Example:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListFirebaseLinksPager:
Response message for
ListFirebaseLinks RPC
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListFirebaseLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListFirebaseLinksRequest):
request = analytics_admin.ListFirebaseLinksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_firebase_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListFirebaseLinksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_global_site_tag(
self,
request: Optional[Union[analytics_admin.GetGlobalSiteTagRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.GlobalSiteTag:
r"""Returns the Site Tag for the specified web stream.
Site Tags are immutable singletons.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_global_site_tag():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetGlobalSiteTagRequest(
name="name_value",
)
# Make the request
response = client.get_global_site_tag(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetGlobalSiteTagRequest, dict]):
The request object. Request message for GetGlobalSiteTag
RPC.
name (str):
Required. The name of the site tag to lookup. Note that
site tags are singletons and do not have unique IDs.
Format:
properties/{property_id}/dataStreams/{stream_id}/globalSiteTag
Example: "properties/123/dataStreams/456/globalSiteTag"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.GlobalSiteTag:
Read-only resource with the tag for
sending data from a website to a
DataStream. Only present for web
DataStream resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetGlobalSiteTagRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetGlobalSiteTagRequest):
request = analytics_admin.GetGlobalSiteTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_global_site_tag]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_google_ads_link(
self,
request: Optional[
Union[analytics_admin.CreateGoogleAdsLinkRequest, dict]
] = None,
*,
parent: Optional[str] = None,
google_ads_link: Optional[resources.GoogleAdsLink] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.GoogleAdsLink:
r"""Creates a GoogleAdsLink.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_google_ads_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateGoogleAdsLinkRequest(
parent="parent_value",
)
# Make the request
response = client.create_google_ads_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateGoogleAdsLinkRequest, dict]):
The request object. Request message for
CreateGoogleAdsLink RPC
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
google_ads_link (google.analytics.admin_v1alpha.types.GoogleAdsLink):
Required. The GoogleAdsLink to
create.
This corresponds to the ``google_ads_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.GoogleAdsLink:
A link between a GA4 property and a
Google Ads account.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, google_ads_link])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateGoogleAdsLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateGoogleAdsLinkRequest):
request = analytics_admin.CreateGoogleAdsLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if google_ads_link is not None:
request.google_ads_link = google_ads_link
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_google_ads_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_google_ads_link(
self,
request: Optional[
Union[analytics_admin.UpdateGoogleAdsLinkRequest, dict]
] = None,
*,
google_ads_link: Optional[resources.GoogleAdsLink] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.GoogleAdsLink:
r"""Updates a GoogleAdsLink on a property
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_google_ads_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateGoogleAdsLinkRequest(
)
# Make the request
response = client.update_google_ads_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateGoogleAdsLinkRequest, dict]):
The request object. Request message for
UpdateGoogleAdsLink RPC
google_ads_link (google.analytics.admin_v1alpha.types.GoogleAdsLink):
The GoogleAdsLink to update
This corresponds to the ``google_ads_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.GoogleAdsLink:
A link between a GA4 property and a
Google Ads account.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([google_ads_link, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateGoogleAdsLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateGoogleAdsLinkRequest):
request = analytics_admin.UpdateGoogleAdsLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if google_ads_link is not None:
request.google_ads_link = google_ads_link
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_google_ads_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("google_ads_link.name", request.google_ads_link.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_google_ads_link(
self,
request: Optional[
Union[analytics_admin.DeleteGoogleAdsLinkRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a GoogleAdsLink on a property
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_google_ads_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteGoogleAdsLinkRequest(
name="name_value",
)
# Make the request
client.delete_google_ads_link(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteGoogleAdsLinkRequest, dict]):
The request object. Request message for
DeleteGoogleAdsLink RPC.
name (str):
Required. Example format:
properties/1234/googleAdsLinks/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteGoogleAdsLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteGoogleAdsLinkRequest):
request = analytics_admin.DeleteGoogleAdsLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_google_ads_link]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def list_google_ads_links(
self,
request: Optional[
Union[analytics_admin.ListGoogleAdsLinksRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGoogleAdsLinksPager:
r"""Lists GoogleAdsLinks on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_google_ads_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListGoogleAdsLinksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_google_ads_links(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListGoogleAdsLinksRequest, dict]):
The request object. Request message for
ListGoogleAdsLinks RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListGoogleAdsLinksPager:
Response message for
ListGoogleAdsLinks RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListGoogleAdsLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListGoogleAdsLinksRequest):
request = analytics_admin.ListGoogleAdsLinksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_google_ads_links]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListGoogleAdsLinksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_data_sharing_settings(
self,
request: Optional[
Union[analytics_admin.GetDataSharingSettingsRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataSharingSettings:
r"""Get data sharing settings on an account.
Data sharing settings are singletons.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_data_sharing_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetDataSharingSettingsRequest(
name="name_value",
)
# Make the request
response = client.get_data_sharing_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetDataSharingSettingsRequest, dict]):
The request object. Request message for
GetDataSharingSettings RPC.
name (str):
Required. The name of the settings to
lookup. Format:
accounts/{account}/dataSharingSettings
Example:
"accounts/1000/dataSharingSettings"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataSharingSettings:
A resource message representing data
sharing settings of a Google Analytics
account.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetDataSharingSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetDataSharingSettingsRequest):
request = analytics_admin.GetDataSharingSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_data_sharing_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_measurement_protocol_secret(
self,
request: Optional[
Union[analytics_admin.GetMeasurementProtocolSecretRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.MeasurementProtocolSecret:
r"""Lookup for a single "GA4" MeasurementProtocolSecret.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_measurement_protocol_secret():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetMeasurementProtocolSecretRequest(
name="name_value",
)
# Make the request
response = client.get_measurement_protocol_secret(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetMeasurementProtocolSecretRequest, dict]):
The request object. Request message for
GetMeasurementProtocolSecret RPC.
name (str):
Required. The name of the measurement
protocol secret to lookup. Format:
properties/{property}/dataStreams/{dataStream}/measurementProtocolSecrets/{measurementProtocolSecret}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.MeasurementProtocolSecret:
A secret value used for sending hits
to Measurement Protocol.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetMeasurementProtocolSecretRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetMeasurementProtocolSecretRequest):
request = analytics_admin.GetMeasurementProtocolSecretRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_measurement_protocol_secret
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_measurement_protocol_secrets(
self,
request: Optional[
Union[analytics_admin.ListMeasurementProtocolSecretsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMeasurementProtocolSecretsPager:
r"""Returns child MeasurementProtocolSecrets under the
specified parent Property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_measurement_protocol_secrets():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListMeasurementProtocolSecretsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_measurement_protocol_secrets(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListMeasurementProtocolSecretsRequest, dict]):
The request object. Request message for
ListMeasurementProtocolSecret RPC
parent (str):
Required. The resource name of the
parent stream. Format:
properties/{property}/dataStreams/{dataStream}/measurementProtocolSecrets
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListMeasurementProtocolSecretsPager:
Response message for
ListMeasurementProtocolSecret RPC
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListMeasurementProtocolSecretsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.ListMeasurementProtocolSecretsRequest
):
request = analytics_admin.ListMeasurementProtocolSecretsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_measurement_protocol_secrets
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListMeasurementProtocolSecretsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_measurement_protocol_secret(
self,
request: Optional[
Union[analytics_admin.CreateMeasurementProtocolSecretRequest, dict]
] = None,
*,
parent: Optional[str] = None,
measurement_protocol_secret: Optional[
resources.MeasurementProtocolSecret
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.MeasurementProtocolSecret:
r"""Creates a measurement protocol secret.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_measurement_protocol_secret():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
measurement_protocol_secret = admin_v1alpha.MeasurementProtocolSecret()
measurement_protocol_secret.display_name = "display_name_value"
request = admin_v1alpha.CreateMeasurementProtocolSecretRequest(
parent="parent_value",
measurement_protocol_secret=measurement_protocol_secret,
)
# Make the request
response = client.create_measurement_protocol_secret(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateMeasurementProtocolSecretRequest, dict]):
The request object. Request message for
CreateMeasurementProtocolSecret RPC
parent (str):
Required. The parent resource where
this secret will be created. Format:
properties/{property}/dataStreams/{dataStream}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
measurement_protocol_secret (google.analytics.admin_v1alpha.types.MeasurementProtocolSecret):
Required. The measurement protocol
secret to create.
This corresponds to the ``measurement_protocol_secret`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.MeasurementProtocolSecret:
A secret value used for sending hits
to Measurement Protocol.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, measurement_protocol_secret])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateMeasurementProtocolSecretRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.CreateMeasurementProtocolSecretRequest
):
request = analytics_admin.CreateMeasurementProtocolSecretRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if measurement_protocol_secret is not None:
request.measurement_protocol_secret = measurement_protocol_secret
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_measurement_protocol_secret
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_measurement_protocol_secret(
self,
request: Optional[
Union[analytics_admin.DeleteMeasurementProtocolSecretRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes target MeasurementProtocolSecret.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_measurement_protocol_secret():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteMeasurementProtocolSecretRequest(
name="name_value",
)
# Make the request
client.delete_measurement_protocol_secret(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteMeasurementProtocolSecretRequest, dict]):
The request object. Request message for
DeleteMeasurementProtocolSecret RPC
name (str):
Required. The name of the
MeasurementProtocolSecret to delete.
Format:
properties/{property}/dataStreams/{dataStream}/measurementProtocolSecrets/{measurementProtocolSecret}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteMeasurementProtocolSecretRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.DeleteMeasurementProtocolSecretRequest
):
request = analytics_admin.DeleteMeasurementProtocolSecretRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_measurement_protocol_secret
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_measurement_protocol_secret(
self,
request: Optional[
Union[analytics_admin.UpdateMeasurementProtocolSecretRequest, dict]
] = None,
*,
measurement_protocol_secret: Optional[
resources.MeasurementProtocolSecret
] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.MeasurementProtocolSecret:
r"""Updates a measurement protocol secret.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_measurement_protocol_secret():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
measurement_protocol_secret = admin_v1alpha.MeasurementProtocolSecret()
measurement_protocol_secret.display_name = "display_name_value"
request = admin_v1alpha.UpdateMeasurementProtocolSecretRequest(
measurement_protocol_secret=measurement_protocol_secret,
)
# Make the request
response = client.update_measurement_protocol_secret(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateMeasurementProtocolSecretRequest, dict]):
The request object. Request message for
UpdateMeasurementProtocolSecret RPC
measurement_protocol_secret (google.analytics.admin_v1alpha.types.MeasurementProtocolSecret):
Required. The measurement protocol
secret to update.
This corresponds to the ``measurement_protocol_secret`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to be updated.
Omitted fields will not be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.MeasurementProtocolSecret:
A secret value used for sending hits
to Measurement Protocol.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([measurement_protocol_secret, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateMeasurementProtocolSecretRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.UpdateMeasurementProtocolSecretRequest
):
request = analytics_admin.UpdateMeasurementProtocolSecretRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if measurement_protocol_secret is not None:
request.measurement_protocol_secret = measurement_protocol_secret
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_measurement_protocol_secret
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"measurement_protocol_secret.name",
request.measurement_protocol_secret.name,
),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def acknowledge_user_data_collection(
self,
request: Optional[
Union[analytics_admin.AcknowledgeUserDataCollectionRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.AcknowledgeUserDataCollectionResponse:
r"""Acknowledges the terms of user data collection for
the specified property.
This acknowledgement must be completed (either in the
Google Analytics UI or via this API) before
MeasurementProtocolSecret resources may be created.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_acknowledge_user_data_collection():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.AcknowledgeUserDataCollectionRequest(
property="property_value",
acknowledgement="acknowledgement_value",
)
# Make the request
response = client.acknowledge_user_data_collection(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.AcknowledgeUserDataCollectionRequest, dict]):
The request object. Request message for
AcknowledgeUserDataCollection RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.AcknowledgeUserDataCollectionResponse:
Response message for
AcknowledgeUserDataCollection RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.AcknowledgeUserDataCollectionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.AcknowledgeUserDataCollectionRequest
):
request = analytics_admin.AcknowledgeUserDataCollectionRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.acknowledge_user_data_collection
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def search_change_history_events(
self,
request: Optional[
Union[analytics_admin.SearchChangeHistoryEventsRequest, dict]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchChangeHistoryEventsPager:
r"""Searches through all changes to an account or its
children given the specified set of filters.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_search_change_history_events():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.SearchChangeHistoryEventsRequest(
account="account_value",
)
# Make the request
page_result = client.search_change_history_events(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.SearchChangeHistoryEventsRequest, dict]):
The request object. Request message for
SearchChangeHistoryEvents RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.SearchChangeHistoryEventsPager:
Response message for SearchAccounts
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.SearchChangeHistoryEventsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.SearchChangeHistoryEventsRequest):
request = analytics_admin.SearchChangeHistoryEventsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.search_change_history_events
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("account", request.account),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchChangeHistoryEventsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_google_signals_settings(
self,
request: Optional[
Union[analytics_admin.GetGoogleSignalsSettingsRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.GoogleSignalsSettings:
r"""Lookup for Google Signals settings for a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_google_signals_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetGoogleSignalsSettingsRequest(
name="name_value",
)
# Make the request
response = client.get_google_signals_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetGoogleSignalsSettingsRequest, dict]):
The request object. Request message for
GetGoogleSignalsSettings RPC
name (str):
Required. The name of the google
signals settings to retrieve. Format:
properties/{property}/googleSignalsSettings
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.GoogleSignalsSettings:
Settings values for Google Signals.
This is a singleton resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetGoogleSignalsSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetGoogleSignalsSettingsRequest):
request = analytics_admin.GetGoogleSignalsSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_google_signals_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_google_signals_settings(
self,
request: Optional[
Union[analytics_admin.UpdateGoogleSignalsSettingsRequest, dict]
] = None,
*,
google_signals_settings: Optional[resources.GoogleSignalsSettings] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.GoogleSignalsSettings:
r"""Updates Google Signals settings for a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_google_signals_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateGoogleSignalsSettingsRequest(
)
# Make the request
response = client.update_google_signals_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateGoogleSignalsSettingsRequest, dict]):
The request object. Request message for
UpdateGoogleSignalsSettings RPC
google_signals_settings (google.analytics.admin_v1alpha.types.GoogleSignalsSettings):
Required. The settings to update. The ``name`` field is
used to identify the settings to be updated.
This corresponds to the ``google_signals_settings`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.GoogleSignalsSettings:
Settings values for Google Signals.
This is a singleton resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([google_signals_settings, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateGoogleSignalsSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateGoogleSignalsSettingsRequest):
request = analytics_admin.UpdateGoogleSignalsSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if google_signals_settings is not None:
request.google_signals_settings = google_signals_settings
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_google_signals_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"google_signals_settings.name",
request.google_signals_settings.name,
),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_conversion_event(
self,
request: Optional[
Union[analytics_admin.CreateConversionEventRequest, dict]
] = None,
*,
parent: Optional[str] = None,
conversion_event: Optional[resources.ConversionEvent] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.ConversionEvent:
r"""Creates a conversion event with the specified
attributes.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_conversion_event():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateConversionEventRequest(
parent="parent_value",
)
# Make the request
response = client.create_conversion_event(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateConversionEventRequest, dict]):
The request object. Request message for
CreateConversionEvent RPC
parent (str):
Required. The resource name of the
parent property where this conversion
event will be created. Format:
properties/123
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversion_event (google.analytics.admin_v1alpha.types.ConversionEvent):
Required. The conversion event to
create.
This corresponds to the ``conversion_event`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.ConversionEvent:
A conversion event in a Google
Analytics property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, conversion_event])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateConversionEventRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateConversionEventRequest):
request = analytics_admin.CreateConversionEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if conversion_event is not None:
request.conversion_event = conversion_event
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_conversion_event]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_conversion_event(
self,
request: Optional[
Union[analytics_admin.GetConversionEventRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.ConversionEvent:
r"""Retrieve a single conversion event.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_conversion_event():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetConversionEventRequest(
name="name_value",
)
# Make the request
response = client.get_conversion_event(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetConversionEventRequest, dict]):
The request object. Request message for
GetConversionEvent RPC
name (str):
Required. The resource name of the conversion event to
retrieve. Format:
properties/{property}/conversionEvents/{conversion_event}
Example: "properties/123/conversionEvents/456"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.ConversionEvent:
A conversion event in a Google
Analytics property.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetConversionEventRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetConversionEventRequest):
request = analytics_admin.GetConversionEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_conversion_event]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_conversion_event(
self,
request: Optional[
Union[analytics_admin.DeleteConversionEventRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a conversion event in a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_conversion_event():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteConversionEventRequest(
name="name_value",
)
# Make the request
client.delete_conversion_event(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteConversionEventRequest, dict]):
The request object. Request message for
DeleteConversionEvent RPC
name (str):
Required. The resource name of the conversion event to
delete. Format:
properties/{property}/conversionEvents/{conversion_event}
Example: "properties/123/conversionEvents/456"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteConversionEventRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteConversionEventRequest):
request = analytics_admin.DeleteConversionEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_conversion_event]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def list_conversion_events(
self,
request: Optional[
Union[analytics_admin.ListConversionEventsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConversionEventsPager:
r"""Returns a list of conversion events in the specified
parent property.
Returns an empty list if no conversion events are found.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_conversion_events():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListConversionEventsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_conversion_events(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListConversionEventsRequest, dict]):
The request object. Request message for
ListConversionEvents RPC
parent (str):
Required. The resource name of the
parent property. Example:
'properties/123'
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListConversionEventsPager:
Response message for
ListConversionEvents RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListConversionEventsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListConversionEventsRequest):
request = analytics_admin.ListConversionEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_conversion_events]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListConversionEventsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_display_video360_advertiser_link(
self,
request: Optional[
Union[analytics_admin.GetDisplayVideo360AdvertiserLinkRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLink:
r"""Look up a single DisplayVideo360AdvertiserLink
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_display_video360_advertiser_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetDisplayVideo360AdvertiserLinkRequest(
name="name_value",
)
# Make the request
response = client.get_display_video360_advertiser_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetDisplayVideo360AdvertiserLinkRequest, dict]):
The request object. Request message for
GetDisplayVideo360AdvertiserLink RPC.
name (str):
Required. The name of the
DisplayVideo360AdvertiserLink to get.
Example format:
properties/1234/displayVideo360AdvertiserLink/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink:
A link between a GA4 property and a
Display & Video 360 advertiser.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetDisplayVideo360AdvertiserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.GetDisplayVideo360AdvertiserLinkRequest
):
request = analytics_admin.GetDisplayVideo360AdvertiserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_display_video360_advertiser_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_display_video360_advertiser_links(
self,
request: Optional[
Union[analytics_admin.ListDisplayVideo360AdvertiserLinksRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDisplayVideo360AdvertiserLinksPager:
r"""Lists all DisplayVideo360AdvertiserLinks on a
property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_display_video360_advertiser_links():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListDisplayVideo360AdvertiserLinksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_display_video360_advertiser_links(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListDisplayVideo360AdvertiserLinksRequest, dict]):
The request object. Request message for
ListDisplayVideo360AdvertiserLinks RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListDisplayVideo360AdvertiserLinksPager:
Response message for
ListDisplayVideo360AdvertiserLinks RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListDisplayVideo360AdvertiserLinksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.ListDisplayVideo360AdvertiserLinksRequest
):
request = analytics_admin.ListDisplayVideo360AdvertiserLinksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_display_video360_advertiser_links
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDisplayVideo360AdvertiserLinksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_display_video360_advertiser_link(
self,
request: Optional[
Union[analytics_admin.CreateDisplayVideo360AdvertiserLinkRequest, dict]
] = None,
*,
parent: Optional[str] = None,
display_video_360_advertiser_link: Optional[
resources.DisplayVideo360AdvertiserLink
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLink:
r"""Creates a DisplayVideo360AdvertiserLink.
This can only be utilized by users who have proper
authorization both on the Google Analytics property and
on the Display & Video 360 advertiser. Users who do not
have access to the Display & Video 360 advertiser should
instead seek to create a DisplayVideo360LinkProposal.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_display_video360_advertiser_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateDisplayVideo360AdvertiserLinkRequest(
parent="parent_value",
)
# Make the request
response = client.create_display_video360_advertiser_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateDisplayVideo360AdvertiserLinkRequest, dict]):
The request object. Request message for
CreateDisplayVideo360AdvertiserLink RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
display_video_360_advertiser_link (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink):
Required. The
DisplayVideo360AdvertiserLink to create.
This corresponds to the ``display_video_360_advertiser_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink:
A link between a GA4 property and a
Display & Video 360 advertiser.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, display_video_360_advertiser_link])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateDisplayVideo360AdvertiserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.CreateDisplayVideo360AdvertiserLinkRequest
):
request = analytics_admin.CreateDisplayVideo360AdvertiserLinkRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if display_video_360_advertiser_link is not None:
request.display_video_360_advertiser_link = (
display_video_360_advertiser_link
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_display_video360_advertiser_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_display_video360_advertiser_link(
self,
request: Optional[
Union[analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a DisplayVideo360AdvertiserLink on a
property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_display_video360_advertiser_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteDisplayVideo360AdvertiserLinkRequest(
name="name_value",
)
# Make the request
client.delete_display_video360_advertiser_link(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteDisplayVideo360AdvertiserLinkRequest, dict]):
The request object. Request message for
DeleteDisplayVideo360AdvertiserLink RPC.
name (str):
Required. The name of the
DisplayVideo360AdvertiserLink to delete.
Example format:
properties/1234/displayVideo360AdvertiserLinks/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest
):
request = analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_display_video360_advertiser_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_display_video360_advertiser_link(
self,
request: Optional[
Union[analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest, dict]
] = None,
*,
display_video_360_advertiser_link: Optional[
resources.DisplayVideo360AdvertiserLink
] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLink:
r"""Updates a DisplayVideo360AdvertiserLink on a
property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_display_video360_advertiser_link():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateDisplayVideo360AdvertiserLinkRequest(
)
# Make the request
response = client.update_display_video360_advertiser_link(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateDisplayVideo360AdvertiserLinkRequest, dict]):
The request object. Request message for
UpdateDisplayVideo360AdvertiserLink RPC.
display_video_360_advertiser_link (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink):
The DisplayVideo360AdvertiserLink to
update
This corresponds to the ``display_video_360_advertiser_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink:
A link between a GA4 property and a
Display & Video 360 advertiser.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([display_video_360_advertiser_link, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest
):
request = analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if display_video_360_advertiser_link is not None:
request.display_video_360_advertiser_link = (
display_video_360_advertiser_link
)
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_display_video360_advertiser_link
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"display_video_360_advertiser_link.name",
request.display_video_360_advertiser_link.name,
),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_display_video360_advertiser_link_proposal(
self,
request: Optional[
Union[analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLinkProposal:
r"""Lookup for a single
DisplayVideo360AdvertiserLinkProposal.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
response = client.get_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetDisplayVideo360AdvertiserLinkProposalRequest, dict]):
The request object. Request message for
GetDisplayVideo360AdvertiserLinkProposal RPC.
name (str):
Required. The name of the
DisplayVideo360AdvertiserLinkProposal to
get. Example format:
properties/1234/displayVideo360AdvertiserLinkProposals/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal:
A proposal for a link between a GA4
property and a Display & Video 360
advertiser.
A proposal is converted to a
DisplayVideo360AdvertiserLink once
approved. Google Analytics admins
approve inbound proposals while Display
& Video 360 admins approve outbound
proposals.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest
):
request = analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_display_video360_advertiser_link_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_display_video360_advertiser_link_proposals(
self,
request: Optional[
Union[
analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest, dict
]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDisplayVideo360AdvertiserLinkProposalsPager:
r"""Lists DisplayVideo360AdvertiserLinkProposals on a
property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_display_video360_advertiser_link_proposals():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListDisplayVideo360AdvertiserLinkProposalsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_display_video360_advertiser_link_proposals(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListDisplayVideo360AdvertiserLinkProposalsRequest, dict]):
The request object. Request message for
ListDisplayVideo360AdvertiserLinkProposals RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListDisplayVideo360AdvertiserLinkProposalsPager:
Response message for
ListDisplayVideo360AdvertiserLinkProposals
RPC. Iterating over this object will
yield results and resolve additional
pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest
):
request = analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_display_video360_advertiser_link_proposals
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDisplayVideo360AdvertiserLinkProposalsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_display_video360_advertiser_link_proposal(
self,
request: Optional[
Union[
analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest, dict
]
] = None,
*,
parent: Optional[str] = None,
display_video_360_advertiser_link_proposal: Optional[
resources.DisplayVideo360AdvertiserLinkProposal
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLinkProposal:
r"""Creates a DisplayVideo360AdvertiserLinkProposal.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CreateDisplayVideo360AdvertiserLinkProposalRequest(
parent="parent_value",
)
# Make the request
response = client.create_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateDisplayVideo360AdvertiserLinkProposalRequest, dict]):
The request object. Request message for
CreateDisplayVideo360AdvertiserLinkProposal RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
display_video_360_advertiser_link_proposal (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal):
Required. The
DisplayVideo360AdvertiserLinkProposal to
create.
This corresponds to the ``display_video_360_advertiser_link_proposal`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal:
A proposal for a link between a GA4
property and a Display & Video 360
advertiser.
A proposal is converted to a
DisplayVideo360AdvertiserLink once
approved. Google Analytics admins
approve inbound proposals while Display
& Video 360 admins approve outbound
proposals.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, display_video_360_advertiser_link_proposal])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest
):
request = (
analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest(
request
)
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if display_video_360_advertiser_link_proposal is not None:
request.display_video_360_advertiser_link_proposal = (
display_video_360_advertiser_link_proposal
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_display_video360_advertiser_link_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_display_video360_advertiser_link_proposal(
self,
request: Optional[
Union[
analytics_admin.DeleteDisplayVideo360AdvertiserLinkProposalRequest, dict
]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a DisplayVideo360AdvertiserLinkProposal on a
property. This can only be used on cancelled proposals.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
client.delete_display_video360_advertiser_link_proposal(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteDisplayVideo360AdvertiserLinkProposalRequest, dict]):
The request object. Request message for
DeleteDisplayVideo360AdvertiserLinkProposal RPC.
name (str):
Required. The name of the
DisplayVideo360AdvertiserLinkProposal to
delete. Example format:
properties/1234/displayVideo360AdvertiserLinkProposals/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteDisplayVideo360AdvertiserLinkProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.DeleteDisplayVideo360AdvertiserLinkProposalRequest
):
request = (
analytics_admin.DeleteDisplayVideo360AdvertiserLinkProposalRequest(
request
)
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_display_video360_advertiser_link_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def approve_display_video360_advertiser_link_proposal(
self,
request: Optional[
Union[
analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalRequest,
dict,
]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalResponse:
r"""Approves a DisplayVideo360AdvertiserLinkProposal.
The DisplayVideo360AdvertiserLinkProposal will be
deleted and a new DisplayVideo360AdvertiserLink will be
created.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_approve_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ApproveDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
response = client.approve_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ApproveDisplayVideo360AdvertiserLinkProposalRequest, dict]):
The request object. Request message for
ApproveDisplayVideo360AdvertiserLinkProposal RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.ApproveDisplayVideo360AdvertiserLinkProposalResponse:
Response message for
ApproveDisplayVideo360AdvertiserLinkProposal
RPC.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalRequest
):
request = (
analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalRequest(
request
)
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.approve_display_video360_advertiser_link_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def cancel_display_video360_advertiser_link_proposal(
self,
request: Optional[
Union[
analytics_admin.CancelDisplayVideo360AdvertiserLinkProposalRequest, dict
]
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLinkProposal:
r"""Cancels a DisplayVideo360AdvertiserLinkProposal.
Cancelling can mean either:
- Declining a proposal initiated from Display & Video
360 - Withdrawing a proposal initiated from Google
Analytics After being cancelled, a proposal will
eventually be deleted automatically.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_cancel_display_video360_advertiser_link_proposal():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.CancelDisplayVideo360AdvertiserLinkProposalRequest(
name="name_value",
)
# Make the request
response = client.cancel_display_video360_advertiser_link_proposal(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CancelDisplayVideo360AdvertiserLinkProposalRequest, dict]):
The request object. Request message for
CancelDisplayVideo360AdvertiserLinkProposal RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal:
A proposal for a link between a GA4
property and a Display & Video 360
advertiser.
A proposal is converted to a
DisplayVideo360AdvertiserLink once
approved. Google Analytics admins
approve inbound proposals while Display
& Video 360 admins approve outbound
proposals.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CancelDisplayVideo360AdvertiserLinkProposalRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, analytics_admin.CancelDisplayVideo360AdvertiserLinkProposalRequest
):
request = (
analytics_admin.CancelDisplayVideo360AdvertiserLinkProposalRequest(
request
)
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.cancel_display_video360_advertiser_link_proposal
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_custom_dimension(
self,
request: Optional[
Union[analytics_admin.CreateCustomDimensionRequest, dict]
] = None,
*,
parent: Optional[str] = None,
custom_dimension: Optional[resources.CustomDimension] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomDimension:
r"""Creates a CustomDimension.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_custom_dimension():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
custom_dimension = admin_v1alpha.CustomDimension()
custom_dimension.parameter_name = "parameter_name_value"
custom_dimension.display_name = "display_name_value"
custom_dimension.scope = "USER"
request = admin_v1alpha.CreateCustomDimensionRequest(
parent="parent_value",
custom_dimension=custom_dimension,
)
# Make the request
response = client.create_custom_dimension(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateCustomDimensionRequest, dict]):
The request object. Request message for
CreateCustomDimension RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
custom_dimension (google.analytics.admin_v1alpha.types.CustomDimension):
Required. The CustomDimension to
create.
This corresponds to the ``custom_dimension`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomDimension:
A definition for a CustomDimension.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, custom_dimension])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateCustomDimensionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateCustomDimensionRequest):
request = analytics_admin.CreateCustomDimensionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if custom_dimension is not None:
request.custom_dimension = custom_dimension
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_custom_dimension]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_custom_dimension(
self,
request: Optional[
Union[analytics_admin.UpdateCustomDimensionRequest, dict]
] = None,
*,
custom_dimension: Optional[resources.CustomDimension] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomDimension:
r"""Updates a CustomDimension on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_custom_dimension():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateCustomDimensionRequest(
)
# Make the request
response = client.update_custom_dimension(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateCustomDimensionRequest, dict]):
The request object. Request message for
UpdateCustomDimension RPC.
custom_dimension (google.analytics.admin_v1alpha.types.CustomDimension):
The CustomDimension to update
This corresponds to the ``custom_dimension`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomDimension:
A definition for a CustomDimension.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([custom_dimension, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateCustomDimensionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateCustomDimensionRequest):
request = analytics_admin.UpdateCustomDimensionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if custom_dimension is not None:
request.custom_dimension = custom_dimension
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_custom_dimension]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("custom_dimension.name", request.custom_dimension.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_custom_dimensions(
self,
request: Optional[
Union[analytics_admin.ListCustomDimensionsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCustomDimensionsPager:
r"""Lists CustomDimensions on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_custom_dimensions():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListCustomDimensionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_custom_dimensions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListCustomDimensionsRequest, dict]):
The request object. Request message for
ListCustomDimensions RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListCustomDimensionsPager:
Response message for
ListCustomDimensions RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListCustomDimensionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListCustomDimensionsRequest):
request = analytics_admin.ListCustomDimensionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_custom_dimensions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCustomDimensionsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def archive_custom_dimension(
self,
request: Optional[
Union[analytics_admin.ArchiveCustomDimensionRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Archives a CustomDimension on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_archive_custom_dimension():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ArchiveCustomDimensionRequest(
name="name_value",
)
# Make the request
client.archive_custom_dimension(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.ArchiveCustomDimensionRequest, dict]):
The request object. Request message for
ArchiveCustomDimension RPC.
name (str):
Required. The name of the
CustomDimension to archive. Example
format:
properties/1234/customDimensions/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ArchiveCustomDimensionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ArchiveCustomDimensionRequest):
request = analytics_admin.ArchiveCustomDimensionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.archive_custom_dimension]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_custom_dimension(
self,
request: Optional[
Union[analytics_admin.GetCustomDimensionRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomDimension:
r"""Lookup for a single CustomDimension.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_custom_dimension():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetCustomDimensionRequest(
name="name_value",
)
# Make the request
response = client.get_custom_dimension(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetCustomDimensionRequest, dict]):
The request object. Request message for
GetCustomDimension RPC.
name (str):
Required. The name of the
CustomDimension to get. Example format:
properties/1234/customDimensions/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomDimension:
A definition for a CustomDimension.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetCustomDimensionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetCustomDimensionRequest):
request = analytics_admin.GetCustomDimensionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_custom_dimension]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_custom_metric(
self,
request: Optional[
Union[analytics_admin.CreateCustomMetricRequest, dict]
] = None,
*,
parent: Optional[str] = None,
custom_metric: Optional[resources.CustomMetric] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomMetric:
r"""Creates a CustomMetric.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_custom_metric():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
custom_metric = admin_v1alpha.CustomMetric()
custom_metric.parameter_name = "parameter_name_value"
custom_metric.display_name = "display_name_value"
custom_metric.measurement_unit = "HOURS"
custom_metric.scope = "EVENT"
request = admin_v1alpha.CreateCustomMetricRequest(
parent="parent_value",
custom_metric=custom_metric,
)
# Make the request
response = client.create_custom_metric(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateCustomMetricRequest, dict]):
The request object. Request message for
CreateCustomMetric RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
custom_metric (google.analytics.admin_v1alpha.types.CustomMetric):
Required. The CustomMetric to create.
This corresponds to the ``custom_metric`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomMetric:
A definition for a custom metric.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, custom_metric])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateCustomMetricRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateCustomMetricRequest):
request = analytics_admin.CreateCustomMetricRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if custom_metric is not None:
request.custom_metric = custom_metric
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_custom_metric]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_custom_metric(
self,
request: Optional[
Union[analytics_admin.UpdateCustomMetricRequest, dict]
] = None,
*,
custom_metric: Optional[resources.CustomMetric] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomMetric:
r"""Updates a CustomMetric on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_custom_metric():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateCustomMetricRequest(
)
# Make the request
response = client.update_custom_metric(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateCustomMetricRequest, dict]):
The request object. Request message for
UpdateCustomMetric RPC.
custom_metric (google.analytics.admin_v1alpha.types.CustomMetric):
The CustomMetric to update
This corresponds to the ``custom_metric`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomMetric:
A definition for a custom metric.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([custom_metric, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateCustomMetricRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateCustomMetricRequest):
request = analytics_admin.UpdateCustomMetricRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if custom_metric is not None:
request.custom_metric = custom_metric
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_custom_metric]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("custom_metric.name", request.custom_metric.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_custom_metrics(
self,
request: Optional[Union[analytics_admin.ListCustomMetricsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCustomMetricsPager:
r"""Lists CustomMetrics on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_custom_metrics():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListCustomMetricsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_custom_metrics(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListCustomMetricsRequest, dict]):
The request object. Request message for
ListCustomMetrics RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListCustomMetricsPager:
Response message for
ListCustomMetrics RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListCustomMetricsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListCustomMetricsRequest):
request = analytics_admin.ListCustomMetricsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_custom_metrics]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCustomMetricsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def archive_custom_metric(
self,
request: Optional[
Union[analytics_admin.ArchiveCustomMetricRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Archives a CustomMetric on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_archive_custom_metric():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ArchiveCustomMetricRequest(
name="name_value",
)
# Make the request
client.archive_custom_metric(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.ArchiveCustomMetricRequest, dict]):
The request object. Request message for
ArchiveCustomMetric RPC.
name (str):
Required. The name of the
CustomMetric to archive. Example format:
properties/1234/customMetrics/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ArchiveCustomMetricRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ArchiveCustomMetricRequest):
request = analytics_admin.ArchiveCustomMetricRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.archive_custom_metric]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_custom_metric(
self,
request: Optional[Union[analytics_admin.GetCustomMetricRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.CustomMetric:
r"""Lookup for a single CustomMetric.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_custom_metric():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetCustomMetricRequest(
name="name_value",
)
# Make the request
response = client.get_custom_metric(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetCustomMetricRequest, dict]):
The request object. Request message for GetCustomMetric
RPC.
name (str):
Required. The name of the
CustomMetric to get. Example format:
properties/1234/customMetrics/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.CustomMetric:
A definition for a custom metric.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetCustomMetricRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetCustomMetricRequest):
request = analytics_admin.GetCustomMetricRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_custom_metric]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_data_retention_settings(
self,
request: Optional[
Union[analytics_admin.GetDataRetentionSettingsRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataRetentionSettings:
r"""Returns the singleton data retention settings for
this property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_data_retention_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetDataRetentionSettingsRequest(
name="name_value",
)
# Make the request
response = client.get_data_retention_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetDataRetentionSettingsRequest, dict]):
The request object. Request message for
GetDataRetentionSettings RPC.
name (str):
Required. The name of the settings to
lookup. Format:
properties/{property}/dataRetentionSettings
Example:
"properties/1000/dataRetentionSettings"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataRetentionSettings:
Settings values for data retention.
This is a singleton resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetDataRetentionSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetDataRetentionSettingsRequest):
request = analytics_admin.GetDataRetentionSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_data_retention_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_data_retention_settings(
self,
request: Optional[
Union[analytics_admin.UpdateDataRetentionSettingsRequest, dict]
] = None,
*,
data_retention_settings: Optional[resources.DataRetentionSettings] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataRetentionSettings:
r"""Updates the singleton data retention settings for
this property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_data_retention_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateDataRetentionSettingsRequest(
)
# Make the request
response = client.update_data_retention_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateDataRetentionSettingsRequest, dict]):
The request object. Request message for
UpdateDataRetentionSettings RPC.
data_retention_settings (google.analytics.admin_v1alpha.types.DataRetentionSettings):
Required. The settings to update. The ``name`` field is
used to identify the settings to be updated.
This corresponds to the ``data_retention_settings`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataRetentionSettings:
Settings values for data retention.
This is a singleton resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([data_retention_settings, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateDataRetentionSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateDataRetentionSettingsRequest):
request = analytics_admin.UpdateDataRetentionSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if data_retention_settings is not None:
request.data_retention_settings = data_retention_settings
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_data_retention_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"data_retention_settings.name",
request.data_retention_settings.name,
),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_data_stream(
self,
request: Optional[Union[analytics_admin.CreateDataStreamRequest, dict]] = None,
*,
parent: Optional[str] = None,
data_stream: Optional[resources.DataStream] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataStream:
r"""Creates a DataStream.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_data_stream():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
data_stream = admin_v1alpha.DataStream()
data_stream.type_ = "IOS_APP_DATA_STREAM"
request = admin_v1alpha.CreateDataStreamRequest(
parent="parent_value",
data_stream=data_stream,
)
# Make the request
response = client.create_data_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateDataStreamRequest, dict]):
The request object. Request message for CreateDataStream
RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
data_stream (google.analytics.admin_v1alpha.types.DataStream):
Required. The DataStream to create.
This corresponds to the ``data_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataStream:
A resource message representing a
data stream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, data_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateDataStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateDataStreamRequest):
request = analytics_admin.CreateDataStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if data_stream is not None:
request.data_stream = data_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_data_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_data_stream(
self,
request: Optional[Union[analytics_admin.DeleteDataStreamRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a DataStream on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_delete_data_stream():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.DeleteDataStreamRequest(
name="name_value",
)
# Make the request
client.delete_data_stream(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.DeleteDataStreamRequest, dict]):
The request object. Request message for DeleteDataStream
RPC.
name (str):
Required. The name of the DataStream
to delete. Example format:
properties/1234/dataStreams/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.DeleteDataStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.DeleteDataStreamRequest):
request = analytics_admin.DeleteDataStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_data_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_data_stream(
self,
request: Optional[Union[analytics_admin.UpdateDataStreamRequest, dict]] = None,
*,
data_stream: Optional[resources.DataStream] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataStream:
r"""Updates a DataStream on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_data_stream():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.UpdateDataStreamRequest(
)
# Make the request
response = client.update_data_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateDataStreamRequest, dict]):
The request object. Request message for UpdateDataStream
RPC.
data_stream (google.analytics.admin_v1alpha.types.DataStream):
The DataStream to update
This corresponds to the ``data_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataStream:
A resource message representing a
data stream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([data_stream, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateDataStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateDataStreamRequest):
request = analytics_admin.UpdateDataStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if data_stream is not None:
request.data_stream = data_stream
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_data_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("data_stream.name", request.data_stream.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_data_streams(
self,
request: Optional[Union[analytics_admin.ListDataStreamsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataStreamsPager:
r"""Lists DataStreams on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_data_streams():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListDataStreamsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_data_streams(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListDataStreamsRequest, dict]):
The request object. Request message for ListDataStreams
RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListDataStreamsPager:
Response message for ListDataStreams
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListDataStreamsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListDataStreamsRequest):
request = analytics_admin.ListDataStreamsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_data_streams]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDataStreamsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_data_stream(
self,
request: Optional[Union[analytics_admin.GetDataStreamRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DataStream:
r"""Lookup for a single DataStream.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_data_stream():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetDataStreamRequest(
name="name_value",
)
# Make the request
response = client.get_data_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetDataStreamRequest, dict]):
The request object. Request message for GetDataStream
RPC.
name (str):
Required. The name of the DataStream
to get. Example format:
properties/1234/dataStreams/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DataStream:
A resource message representing a
data stream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetDataStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetDataStreamRequest):
request = analytics_admin.GetDataStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_data_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_audience(
self,
request: Optional[Union[analytics_admin.GetAudienceRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> audience.Audience:
r"""Lookup for a single Audience.
Audiences created before 2020 may not be supported.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_audience():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetAudienceRequest(
name="name_value",
)
# Make the request
response = client.get_audience(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetAudienceRequest, dict]):
The request object. Request message for GetAudience RPC.
name (str):
Required. The name of the Audience to
get. Example format:
properties/1234/audiences/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Audience:
A resource message representing a GA4
Audience.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetAudienceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetAudienceRequest):
request = analytics_admin.GetAudienceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_audience]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_audiences(
self,
request: Optional[Union[analytics_admin.ListAudiencesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAudiencesPager:
r"""Lists Audiences on a property.
Audiences created before 2020 may not be supported.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_list_audiences():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ListAudiencesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_audiences(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.ListAudiencesRequest, dict]):
The request object. Request message for ListAudiences
RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListAudiencesPager:
Response message for ListAudiences
RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ListAudiencesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ListAudiencesRequest):
request = analytics_admin.ListAudiencesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_audiences]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAudiencesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_audience(
self,
request: Optional[Union[analytics_admin.CreateAudienceRequest, dict]] = None,
*,
parent: Optional[str] = None,
audience: Optional[gaa_audience.Audience] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gaa_audience.Audience:
r"""Creates an Audience.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_create_audience():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
audience = admin_v1alpha.Audience()
audience.display_name = "display_name_value"
audience.description = "description_value"
audience.membership_duration_days = 2561
audience.filter_clauses.simple_filter.scope = "AUDIENCE_FILTER_SCOPE_ACROSS_ALL_SESSIONS"
audience.filter_clauses.clause_type = "EXCLUDE"
request = admin_v1alpha.CreateAudienceRequest(
parent="parent_value",
audience=audience,
)
# Make the request
response = client.create_audience(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.CreateAudienceRequest, dict]):
The request object. Request message for CreateAudience
RPC.
parent (str):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
audience (google.analytics.admin_v1alpha.types.Audience):
Required. The audience to create.
This corresponds to the ``audience`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Audience:
A resource message representing a GA4
Audience.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, audience])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.CreateAudienceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.CreateAudienceRequest):
request = analytics_admin.CreateAudienceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if audience is not None:
request.audience = audience
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_audience]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_audience(
self,
request: Optional[Union[analytics_admin.UpdateAudienceRequest, dict]] = None,
*,
audience: Optional[gaa_audience.Audience] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gaa_audience.Audience:
r"""Updates an Audience on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_audience():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
audience = admin_v1alpha.Audience()
audience.display_name = "display_name_value"
audience.description = "description_value"
audience.membership_duration_days = 2561
audience.filter_clauses.simple_filter.scope = "AUDIENCE_FILTER_SCOPE_ACROSS_ALL_SESSIONS"
audience.filter_clauses.clause_type = "EXCLUDE"
request = admin_v1alpha.UpdateAudienceRequest(
audience=audience,
)
# Make the request
response = client.update_audience(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateAudienceRequest, dict]):
The request object. Request message for UpdateAudience
RPC.
audience (google.analytics.admin_v1alpha.types.Audience):
Required. The audience to update. The audience's
``name`` field is used to identify the audience to be
updated.
This corresponds to the ``audience`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Audience:
A resource message representing a GA4
Audience.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([audience, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateAudienceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateAudienceRequest):
request = analytics_admin.UpdateAudienceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if audience is not None:
request.audience = audience
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_audience]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("audience.name", request.audience.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def archive_audience(
self,
request: Optional[Union[analytics_admin.ArchiveAudienceRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Archives an Audience on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_archive_audience():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.ArchiveAudienceRequest(
name="name_value",
)
# Make the request
client.archive_audience(request=request)
Args:
request (Union[google.analytics.admin_v1alpha.types.ArchiveAudienceRequest, dict]):
The request object. Request message for ArchiveAudience
RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.ArchiveAudienceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.ArchiveAudienceRequest):
request = analytics_admin.ArchiveAudienceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.archive_audience]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_attribution_settings(
self,
request: Optional[
Union[analytics_admin.GetAttributionSettingsRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.AttributionSettings:
r"""Lookup for a AttributionSettings singleton.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_get_attribution_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.GetAttributionSettingsRequest(
name="name_value",
)
# Make the request
response = client.get_attribution_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.GetAttributionSettingsRequest, dict]):
The request object. Request message for
GetAttributionSettings RPC.
name (str):
Required. The name of the attribution
settings to retrieve. Format:
properties/{property}/attributionSettings
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.AttributionSettings:
The attribution settings used for a
given property. This is a singleton
resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.GetAttributionSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.GetAttributionSettingsRequest):
request = analytics_admin.GetAttributionSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_attribution_settings]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_attribution_settings(
self,
request: Optional[
Union[analytics_admin.UpdateAttributionSettingsRequest, dict]
] = None,
*,
attribution_settings: Optional[resources.AttributionSettings] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.AttributionSettings:
r"""Updates attribution settings on a property.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_update_attribution_settings():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
attribution_settings = admin_v1alpha.AttributionSettings()
attribution_settings.acquisition_conversion_event_lookback_window = "ACQUISITION_CONVERSION_EVENT_LOOKBACK_WINDOW_30_DAYS"
attribution_settings.other_conversion_event_lookback_window = "OTHER_CONVERSION_EVENT_LOOKBACK_WINDOW_90_DAYS"
attribution_settings.reporting_attribution_model = "ADS_PREFERRED_LAST_CLICK"
request = admin_v1alpha.UpdateAttributionSettingsRequest(
attribution_settings=attribution_settings,
)
# Make the request
response = client.update_attribution_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.UpdateAttributionSettingsRequest, dict]):
The request object. Request message for
UpdateAttributionSettings RPC
attribution_settings (google.analytics.admin_v1alpha.types.AttributionSettings):
Required. The attribution settings to update. The
``name`` field is used to identify the settings to be
updated.
This corresponds to the ``attribution_settings`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated. Field names
must be in snake case (e.g., "field_to_update"). Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.AttributionSettings:
The attribution settings used for a
given property. This is a singleton
resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([attribution_settings, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.UpdateAttributionSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.UpdateAttributionSettingsRequest):
request = analytics_admin.UpdateAttributionSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if attribution_settings is not None:
request.attribution_settings = attribution_settings
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_attribution_settings
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("attribution_settings.name", request.attribution_settings.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def run_access_report(
self,
request: Optional[Union[analytics_admin.RunAccessReportRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> analytics_admin.RunAccessReportResponse:
r"""Returns a customized report of data access records. The report
provides records of each time a user reads Google Analytics
reporting data. Access records are retained for up to 2 years.
Data Access Reports can be requested for a property. The
property must be in Google Analytics 360. This method is only
available to Administrators.
These data access records include GA4 UI Reporting, GA4 UI
Explorations, GA4 Data API, and other products like Firebase &
Admob that can retrieve data from Google Analytics through a
linkage. These records don't include property configuration
changes like adding a stream or changing a property's time zone.
For configuration change history, see
`searchChangeHistoryEvents <https://developers.google.com/analytics/devguides/config/admin/v1/rest/v1alpha/accounts/searchChangeHistoryEvents>`__.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.analytics import admin_v1alpha
def sample_run_access_report():
# Create a client
client = admin_v1alpha.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1alpha.RunAccessReportRequest(
)
# Make the request
response = client.run_access_report(request=request)
# Handle the response
print(response)
Args:
request (Union[google.analytics.admin_v1alpha.types.RunAccessReportRequest, dict]):
The request object. The request for a Data Access Record
Report.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.RunAccessReportResponse:
The customized Data Access Record
Report response.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a analytics_admin.RunAccessReportRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, analytics_admin.RunAccessReportRequest):
request = analytics_admin.RunAccessReportRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.run_access_report]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("entity", request.entity),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-analytics-admin",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AnalyticsAdminServiceClient",)
| {
"content_hash": "4dbca1e58ad4db7c5cb755150635b04c",
"timestamp": "",
"source": "github",
"line_count": 9291,
"max_line_length": 154,
"avg_line_length": 41.466150037670864,
"alnum_prop": 0.597967097715321,
"repo_name": "googleapis/python-analytics-admin",
"id": "fbb273ea811732e65491f8d1a90009333b62dc7c",
"size": "385862",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/analytics/admin_v1alpha/services/analytics_admin_service/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
"""Script that updates the language lists in Wikimedia family files."""
#
# (C) xqt, 2009-2014
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import re
import sys
from xml.etree import cElementTree
import pywikibot
from pywikibot.family import Family
if sys.version_info[0] > 2:
from urllib.request import urlopen
else:
from urllib import urlopen
URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=xml'
familiesDict = {
'anarchopedia': 'anarchopedias',
'wikibooks': 'wikibooks',
'wikinews': 'wikinews',
'wikipedia': 'wikipedias',
'wikiquote': 'wikiquotes',
'wikisource': 'wikisources',
'wikiversity': 'wikiversity',
'wikivoyage': 'wikivoyage',
'wiktionary': 'wiktionaries',
}
exceptions = ['-']
def update_family(families):
"""Update family files."""
for family in families or familiesDict.keys():
pywikibot.output('\nChecking family %s:' % family)
original = Family.load(family).languages_by_size
obsolete = Family.load(family).obsolete
feed = urlopen(URL % familiesDict[family])
tree = cElementTree.parse(feed)
new = []
for field in tree.findall('row/field'):
if field.get('name') == 'prefix':
code = field.text
if not (code in obsolete or code in exceptions):
new.append(code)
continue
# put the missing languages to the right place
missing = original != new and set(original) - set(new)
if missing:
pywikibot.output(u"WARNING: ['%s'] not listed at wikistats."
% "', '".join(missing))
index = {}
for code in missing:
index[original.index(code)] = code
i = len(index) - 1
for key in sorted(index.keys(), reverse=True):
new.insert(key - i, index[key])
i -= 1
if original == new:
pywikibot.output(u'The lists match!')
else:
pywikibot.output(u"The lists don't match, the new list is:")
text = u' self.languages_by_size = [\r\n'
line = ' ' * 11
for code in new:
if len(line) + len(code) <= 76:
line += u" '%s'," % code
else:
text += u'%s\r\n' % line
line = ' ' * 11
line += u" '%s'," % code
text += u'%s\r\n' % line
text += u' ]'
pywikibot.output(text)
family_file_name = 'pywikibot/families/%s_family.py' % family
family_file = codecs.open(family_file_name, 'r', 'utf8')
family_text = family_file.read()
old = re.findall(r'(?msu)^ {8}self.languages_by_size.+?\]',
family_text)[0]
family_text = family_text.replace(old, text)
family_file = codecs.open(family_file_name, 'w', 'utf8')
family_file.write(family_text)
family_file.close()
if __name__ == '__main__':
fam = []
for arg in pywikibot.handleArgs():
if arg in familiesDict.keys() and arg not in fam:
fam.append(arg)
update_family(fam)
| {
"content_hash": "c347e94e2bd4afb847a5a8f2638a4ea8",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 77,
"avg_line_length": 31.422018348623855,
"alnum_prop": 0.538978102189781,
"repo_name": "h4ck3rm1k3/pywikibot-core",
"id": "3c56c2847c29418186473f733bd50427867af0d3",
"size": "3468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/maintenance/wikimedia_sites.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4210758"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
} |
"""Utility functions for handling MIDI data in an easy to read/manipulate
format
"""
import midi
import numpy as np
import warnings
import collections
import copy
from .instrument import Instrument
from .containers import KeySignature, TimeSignature
from .containers import Note, PitchBend, ControlChange
from .utilities import mode_accidentals_to_key_number
from .utilities import key_number_to_mode_accidentals
from .utilities import qpm_to_bpm
# The largest we'd ever expect a tick to be
MAX_TICK = 1e7
class PrettyMIDI(object):
"""A container for MIDI data in an easily-manipulable format.
Parameters
----------
midi_file : str or file
Path or file pointer to a MIDI file.
Default None which means create an empty class with the supplied values
for resolutiona and initial tempo.
resolution : int
Resolution of the MIDI data, when no file is provided.
intitial_tempo : float
Initial tempo for the MIDI data, when no file is provided.
Attributes
----------
instruments : list
List of pretty_midi.Instrument objects.
"""
def __init__(self, midi_file=None, resolution=220, initial_tempo=120.):
"""Initialize the PrettyMIDI container, either by populating it with
MIDI data from a file or from scratch with no data.
"""
if midi_file is not None:
# Load in the MIDI data using the midi module
midi_data = midi.read_midifile(midi_file)
# Convert tick values in midi_data to absolute, a useful thing.
midi_data.make_ticks_abs()
# Store the resolution for later use
self.resolution = midi_data.resolution
# Populate the list of tempo changes (tick scales)
self._load_tempo_changes(midi_data)
# Update the array which maps ticks to time
max_tick = max([max([e.tick for e in t]) for t in midi_data]) + 1
# If max_tick is huge, the MIDI file is probably corrupt
# and creating the __tick_to_time array will thrash memory
if max_tick > MAX_TICK:
raise ValueError(('MIDI file has a largest tick of {},'
' it is likely corrupt'.format(max_tick)))
# Create list that maps ticks to time in seconds
self._update_tick_to_time(max_tick)
# Populate the list of key and time signature changes
self._load_metadata(midi_data)
# Check that there are tempo, key and time change events
# only on track 0
if sum([sum([isinstance(event, (midi.events.SetTempoEvent,
midi.events.KeySignatureEvent,
midi.events.TimeSignatureEvent))
for event in track]) for track in midi_data[1:]]):
warnings.warn(("Tempo, Key or Time signature change events"
" found on non-zero tracks."
" This is not a valid type 0 or type 1 MIDI"
" file. Tempo, Key or Time Signature"
" may be wrong."),
RuntimeWarning)
# Populate the list of instruments
self._load_instruments(midi_data)
else:
self.resolution = resolution
# Compute the tick scale for the provided initial tempo
# and let the tick scale start from 0
self.__tick_scales = [(0, 60.0/(initial_tempo*self.resolution))]
# Only need to convert one tick to time
self.__tick_to_time = [0]
# Empty instruments list
self.instruments = []
# Empty key signature changes list
self.key_signature_changes = []
# Empty time signatures changes list
self.time_signature_changes = []
def _load_tempo_changes(self, midi_data):
"""Populates self.__tick_scales with tuples of (tick, tick_scale)
Parameters
----------
midi_data : midi.FileReader
MIDI object from which data will be read
"""
# MIDI data is given in "ticks".
# We need to convert this to clock seconds.
# The conversion factor involves the BPM, which may change over time.
# So, create a list of tuples, (time, tempo)
# denoting a tempo change at a certain time.
# By default, set the tempo to 120 bpm, starting at time 0
self.__tick_scales = [(0, 60.0/(120.0*midi_data.resolution))]
# For SMF file type 0, all events are on track 0.
# For type 1, all tempo events should be on track 1.
# Everyone ignores type 2.
# So, just look at events on track 0
for event in midi_data[0]:
if isinstance(event, midi.events.SetTempoEvent):
# Only allow one tempo change event at the beginning
if event.tick == 0:
bpm = event.get_bpm()
self.__tick_scales = [(0, 60.0/(bpm*midi_data.resolution))]
else:
# Get time and BPM up to this point
_, last_tick_scale = self.__tick_scales[-1]
tick_scale = 60.0/(event.get_bpm()*midi_data.resolution)
# Ignore repetition of BPM, which happens often
if tick_scale != last_tick_scale:
self.__tick_scales.append((event.tick, tick_scale))
def _load_metadata(self, midi_data):
"""Populates self.time_signature_changes with TimeSignature objects and
populates self.key_signature_changes with KeySignature objects.
Parameters
----------
midi_data : midi.FileReader
MIDI object from which data will be read
"""
# list to store key signature changes
self.key_signature_changes = []
# list to store time signatures changes
self.time_signature_changes = []
for event in midi_data[0]:
if isinstance(event, midi.events.KeySignatureEvent):
key_obj = KeySignature(mode_accidentals_to_key_number(
event.data[1], event.get_alternatives()),
self.__tick_to_time[event.tick])
self.key_signature_changes.append(key_obj)
elif isinstance(event, midi.events.TimeSignatureEvent):
ts_obj = TimeSignature(event.get_numerator(),
event.get_denominator(),
self.__tick_to_time[event.tick])
self.time_signature_changes.append(ts_obj)
def _update_tick_to_time(self, max_tick):
"""Creates __tick_to_time, a class member array which maps ticks to
time starting from tick 0 and ending at max_tick
Parameters
----------
max_tick : int
last tick to compute time for
"""
# Allocate tick to time array - indexed by tick from 0 to max_tick
self.__tick_to_time = np.zeros(max_tick + 1)
# Keep track of the end time of the last tick in the previous interval
last_end_time = 0
# Cycle through intervals of different tempi
for (start_tick, tick_scale), (end_tick, _) in \
zip(self.__tick_scales[:-1], self.__tick_scales[1:]):
# Convert ticks in this interval to times
ticks = np.arange(end_tick - start_tick + 1)
self.__tick_to_time[start_tick:end_tick + 1] = (last_end_time +
tick_scale*ticks)
# Update the time of the last tick in this interval
last_end_time = self.__tick_to_time[end_tick]
# For the final interval, use the final tempo setting
# and ticks from the final tempo setting until max_tick
start_tick, tick_scale = self.__tick_scales[-1]
ticks = np.arange(max_tick + 1 - start_tick)
self.__tick_to_time[start_tick:] = (last_end_time +
tick_scale*ticks)
def _load_instruments(self, midi_data):
"""Populates the list of instruments in midi_data.
Parameters
----------
midi_data : midi.FileReader
MIDI object from which data will be read
"""
# Initialize empty list of instruments
self.instruments = []
for track in midi_data:
# Keep track of last note on location:
# key = (instrument, is_drum, note),
# value = (note on time, velocity)
last_note_on = collections.defaultdict(list)
# Keep track of which instrument is playing in each channel
# initialize to program 0 for all channels
current_instrument = np.zeros(16, dtype=np.int)
for event in track:
# Look for program change events
if event.name == 'Program Change':
# Update the instrument for this channel
current_instrument[event.channel] = event.data[0]
# Note ons are note on events with velocity > 0
elif event.name == 'Note On' and event.velocity > 0:
# Check whether this event is for the drum channel
is_drum = (event.channel == 9)
# Store this as the last note-on location
note_on_index = (current_instrument[event.channel],
is_drum, event.pitch)
last_note_on[note_on_index].append((
self.__tick_to_time[event.tick],
event.velocity))
# Note offs can also be note on events with 0 velocity
elif event.name == 'Note Off' or (event.name == 'Note On' and
event.velocity == 0):
# Get the instrument's drum type
is_drum = (event.channel == 9)
# Check that a note-on exists (ignore spurious note-offs)
if (current_instrument[event.channel],
is_drum, event.pitch) in last_note_on:
# Get the start/stop times and velocity of every note
# which was turned on with this instrument/drum/pitch
for start, velocity in last_note_on[
(current_instrument[event.channel],
is_drum, event.pitch)]:
end = self.__tick_to_time[event.tick]
# Create the note event
note = Note(velocity, event.pitch, start, end)
# Get the program and drum type for the current
# instrument
program = current_instrument[event.channel]
# Retrieve the Instrument instance for the current
# instrument
instrument = self.__get_instrument(program,
is_drum)
# Add the note event
instrument.notes.append(note)
# Remove the last note on for this instrument
del last_note_on[(current_instrument[event.channel],
is_drum, event.pitch)]
# Store pitch bends
elif event.name == 'Pitch Wheel':
# Create pitch bend class instance
bend = PitchBend(event.pitch,
self.__tick_to_time[event.tick])
# Get the program and drum type for the current inst
program = current_instrument[event.channel]
is_drum = (event.channel == 9)
# Retrieve the Instrument instance for the current inst
instrument = self.__get_instrument(program, is_drum)
# Add the pitch bend event
instrument.pitch_bends.append(bend)
# Store control changes
elif event.name == 'Control Change':
control_change = ControlChange(
event.data[0], event.data[1],
self.__tick_to_time[event.tick])
# Get the program and drum type for the current inst
program = current_instrument[event.channel]
is_drum = (event.channel == 9)
# Retrieve the Instrument instance for the current inst
instrument = self.__get_instrument(program, is_drum)
# Add the control change event
instrument.control_changes.append(control_change)
def __get_instrument(self, program, is_drum):
"""Gets the Instrument corresponding to the given program number and
drum/non-drum type. If no such instrument exists, one is created.
"""
for instrument in self.instruments:
if (instrument.program == program and
instrument.is_drum == is_drum):
# Add this note event
return instrument
# Create the instrument if none was found
self.instruments.append(Instrument(program, is_drum))
instrument = self.instruments[-1]
return instrument
def get_tempo_changes(self):
"""Return arrays of tempo changes and their times.
This is direct from the MIDI file.
Returns
-------
tempo_change_times : np.ndarray
Times, in seconds, where the tempo changes.
tempi : np.ndarray
What the tempo is at each point in time in tempo_change_times
"""
# Pre-allocate return arrays
tempo_change_times = np.zeros(len(self.__tick_scales))
tempi = np.zeros(len(self.__tick_scales))
for n, (tick, tick_scale) in enumerate(self.__tick_scales):
# Convert tick of this tempo change to time in seconds
tempo_change_times[n] = self.__tick_to_time[tick]
# Convert tick scale to a tempo
tempi[n] = 60.0/(tick_scale*self.resolution)
return tempo_change_times, tempi
def get_end_time(self):
"""Returns the time of the end of this MIDI file (latest note-off event).
Returns
-------
end_time : float
Time, in seconds, where this MIDI file ends
"""
# Cycle through all notes from all instruments and find the largest
events = ([n.end for i in self.instruments for n in i.notes] +
[b.time for i in self.instruments for b in i.pitch_bends])
# If there are no events, return 0
if len(events) == 0:
return 0.
else:
return max(events)
def estimate_tempi(self):
"""Return an empirical estimate of tempos in the piece and each tempo's
probability.
Based on "Automatic Extraction of Tempo and Beat from Expressive
Performance", Dixon 2001
Returns
-------
tempos : np.ndarray
Array of estimated tempos, in bpm
probabilities : np.ndarray
Array of the probability of each tempo estimate
"""
# Grab the list of onsets
onsets = self.get_onsets()
# Compute inner-onset intervals
ioi = np.diff(onsets)
# "Rhythmic information is provided by IOIs in the range of
# approximately 50ms to 2s (Handel, 1989)"
ioi = ioi[ioi > .05]
ioi = ioi[ioi < 2]
# Normalize all iois into the range 30...300bpm
for n in xrange(ioi.shape[0]):
while ioi[n] < .2:
ioi[n] *= 2
# Array of inner onset interval cluster means
clusters = np.array([])
# Number of iois in each cluster
cluster_counts = np.array([])
for interval in ioi:
# If this ioi falls within a cluster (threshold is 25ms)
if (np.abs(clusters - interval) < .025).any():
k = np.argmin(clusters - interval)
# Update cluster mean
clusters[k] = (cluster_counts[k]*clusters[k] +
interval)/(cluster_counts[k] + 1)
# Update number of elements in cluster
cluster_counts[k] += 1
# No cluster is close, make a new one
else:
clusters = np.append(clusters, interval)
cluster_counts = np.append(cluster_counts, 1.)
# Sort the cluster list by count
cluster_sort = np.argsort(cluster_counts)[::-1]
clusters = clusters[cluster_sort]
cluster_counts = cluster_counts[cluster_sort]
# Normalize the cluster scores
cluster_counts /= cluster_counts.sum()
return 60./clusters, cluster_counts
def estimate_tempo(self):
"""Returns the best tempo estimate from estimate_tempi(), for
convenience
Returns
-------
tempo : float
Estimated tempo, in bpm
"""
return self.estimate_tempi()[0][0]
def get_beats(self, start_time=0.):
"""Return a list of beat locations, according to MIDI tempo changes.
Will not be correct if the MIDI data has been modified without changing
tempo information.
Parameters
----------
start_time : float
Location of the first beat, in seconds.
Returns
-------
beats : np.ndarray
Beat locations, in seconds.
"""
# Get tempo changes and tempos
tempo_change_times, tempi = self.get_tempo_changes()
# Create beat list; first beat is at first onset
beats = [start_time]
# Index of the tempo we're using
tempo_idx = 0
# Move past all the tempo changes up to the supplied start time
while (tempo_idx < tempo_change_times.shape[0] - 1 and
beats[-1] > tempo_change_times[tempo_idx]):
tempo_idx += 1
# Index of the time signature change we're using
ts_idx = 0
# Move past all time signature changes up to the supplied start time
while (ts_idx < len(self.time_signature_changes) - 1 and
beats[-1] > self.time_signature_changes[ts_idx]):
ts_idx += 1
# Get track end time
end_time = self.get_end_time()
# Add beats in
while beats[-1] < end_time:
# Compute expected beat location, one period later
bpm = qpm_to_bpm(tempi[tempo_idx],
self.time_signature_changes[ts_idx].numerator,
self.time_signature_changes[ts_idx].denominator)
next_beat = beats[-1] + 60.0/bpm
# If the next beat location passes a time signature change boundary
if ts_idx < len(self.time_signature_changes) - 1:
# Time of the next time signature change
next_ts_time = self.time_signature_changes[ts_idx + 1].time
if (next_beat > next_ts_time or
np.isclose(next_beat, next_ts_time)):
# Set the next beat to the time signature change time
next_beat = self.time_signature_changes[ts_idx + 1].time
# Update the time signature index
ts_idx += 1
bpm = qpm_to_bpm(
tempi[tempo_idx],
self.time_signature_changes[ts_idx].numerator,
self.time_signature_changes[ts_idx].denominator)
# If the beat location passes a tempo change boundary...
if (tempo_idx < tempo_change_times.shape[0] - 1 and
next_beat > tempo_change_times[tempo_idx + 1]):
# Start by setting the beat location to the current beat...
next_beat = beats[-1]
# with the entire beat remaining
beat_remaining = 1.0
# While a beat with the current tempo would pass a tempo
# change boundary...
while (tempo_idx < tempo_change_times.shape[0] - 1 and
next_beat + beat_remaining*60.0/bpm >=
tempo_change_times[tempo_idx + 1]):
# Compute bpm adjusted for time signature
bpm = qpm_to_bpm(
tempi[tempo_idx],
self.time_signature_changes[ts_idx].numerator,
self.time_signature_changes[ts_idx].denominator)
# Compute the amount the beat location overshoots
overshot_ratio = (tempo_change_times[tempo_idx + 1] -
next_beat)/(60.0/bpm)
# Add in the amount of the beat during this tempo
next_beat += overshot_ratio*60.0/bpm
# Less of the beat remains now
beat_remaining -= overshot_ratio
# Increment the tempo index
tempo_idx = tempo_idx + 1
# Compute bpm adjusted for time signature
bpm = qpm_to_bpm(
tempi[tempo_idx],
self.time_signature_changes[ts_idx].numerator,
self.time_signature_changes[ts_idx].denominator)
next_beat += beat_remaining*60./bpm
beats.append(next_beat)
# The last beat will pass the end_time barrier, so don't include it
beats = np.array(beats[:-1])
return beats
def estimate_beat_start(self, candidates=10, tolerance=.025):
"""Estimate the location of the first beat based on which of the first
few onsets results in the best correlation with the onset spike train.
Parameters
----------
candidates : int
Number of candidate onsets to try
tolerance : float
The tolerance in seconds around which onsets will be used to
treat a beat as correct
Returns
-------
beat_start : float
The offset which is chosen as the beat start location
"""
# Get a sorted list of all notes from all instruments
note_list = [n for i in self.instruments for n in i.notes]
note_list.sort(key=lambda note: note.start)
# List of possible beat trackings
beat_candidates = []
# List of start times for each beat candidate
start_times = []
onset_index = 0
# Try the first 10 (unique) onsets as beat tracking start locations
while (len(beat_candidates) <= candidates and
len(beat_candidates) <= len(note_list) and
onset_index < len(note_list)):
# Make sure we are using a new start location
if onset_index == 0 or np.abs(note_list[onset_index - 1].start -
note_list[onset_index].start) > .001:
beat_candidates.append(
self.get_beats(note_list[onset_index].start))
start_times.append(note_list[onset_index].start)
onset_index += 1
# Compute onset scores
onset_scores = np.zeros(len(beat_candidates))
# Synthesize note onset signal, with velocity-valued spikes at onsets
fs = 1000
onset_signal = np.zeros(int(fs*(self.get_end_time() + 1)))
for note in note_list:
onset_signal[int(note.start*fs)] += note.velocity
for n, beats in enumerate(beat_candidates):
# Create a synthetic beat signal with 25ms windows
beat_signal = np.zeros(int(fs*(self.get_end_time() + 1)))
for beat in np.append(0, beats):
if beat - tolerance < 0:
beat_window = np.ones(
int(fs*2*tolerance + (beat - tolerance)*fs))
beat_signal[:int((beat + tolerance)*fs)] = beat_window
else:
beat_start = int((beat - tolerance)*fs)
beat_end = beat_start + int(fs*tolerance*2)
beat_window = np.ones(int(fs*tolerance*2))
beat_signal[beat_start:beat_end] = beat_window
# Compute their dot product and normalize to get score
onset_scores[n] = np.dot(beat_signal, onset_signal)/beats.shape[0]
# Return the best-scoring beat start
return start_times[np.argmax(onset_scores)]
def get_onsets(self):
"""Return a sorted list of the times of all onsets of all notes from
all instruments. May have duplicate entries.
Returns
-------
onsets : np.ndarray
Onset locations, in seconds
"""
onsets = np.array([])
# Just concatenate onsets from all the instruments
for instrument in self.instruments:
onsets = np.append(onsets, instrument.get_onsets())
# Return them sorted (because why not?)
return np.sort(onsets)
def get_piano_roll(self, fs=100, times=None):
"""Get the MIDI data in piano roll notation.
Parameters
----------
fs : int
Sampling frequency of the columns, i.e. each column is spaced apart
by 1./fs seconds
times : np.ndarray
Times of the start of each column in the piano roll.
Default None which is np.arange(0, get_end_time(), 1./fs)
Returns
-------
piano_roll : np.ndarray, shape=(128,times.shape[0])
Piano roll of MIDI data, flattened across instruments
"""
# If there are no instruments, return an empty array
if len(self.instruments) == 0:
return np.zeros((128, 0))
# Get piano rolls for each instrument
piano_rolls = [i.get_piano_roll(fs=fs, times=times)
for i in self.instruments]
# Allocate piano roll,
# number of columns is max of # of columns in all piano rolls
piano_roll = np.zeros((128, np.max([p.shape[1] for p in piano_rolls])),
dtype=np.int16)
# Sum each piano roll into the aggregate piano roll
for roll in piano_rolls:
piano_roll[:, :roll.shape[1]] += roll
return piano_roll
def get_pitch_class_histogram(self, use_duration=False,
use_velocity=False, normalize=True):
"""Computes the histogram of pitch classes given all tracks
Parameters
----------
use_duration : bool
Weight frequency by note duration
use_velocity : bool
Weight frequency by note velocity
normalize : bool
Normalizes the histogram such that the sum of bin values is 1.
Returns
-------
histogram : np.ndarray, shape=(12,)
Histogram of pitch classes given all tracks, optionally weighted
by their durations or velocities
"""
# Sum up all histograms from all instruments defaulting to np.zeros(12)
histogram = sum([
i.get_pitch_class_histogram(use_duration, use_velocity)
for i in self.instruments], np.zeros(12))
# Normalize accordingly
if normalize:
histogram /= (histogram.sum() + (histogram.sum() == 0))
return histogram
def get_pitch_class_transition_matrix(self, normalize=False,
time_thresh=0.05):
"""Computes the transition matrix of pitch classes given all tracks
Parameters
----------
use_duration : bool
Increase frequency by transition duration (current and
next note)
normalize : bool
Normalize transition matrix such that matrix sum equals is 1.
time_thresh : float
Maximum temporal threshold, in seconds, between the start of a note
and end time of any other note for a transition to be added.
Returns
-------
pitch_class_transition_matrix : np.ndarray, shape=(12,12)
Pitch class transition matrix given all tracks
"""
# Sum up all matrices from all instruments defaulting zeros matrix
pc_trans_mat = sum([i.get_pitch_class_transition_matrix(normalize,
time_thresh)
for i in self.instruments], np.zeros((12, 12)))
# Normalize accordingly
if normalize:
pc_trans_mat /= (pc_trans_mat.sum() + (pc_trans_mat.sum() == 0))
return pc_trans_mat
def get_chroma(self, fs=100, times=None):
"""Get the MIDI data as a sequence of chroma vectors.
Parameters
----------
fs : int
Sampling frequency of the columns, i.e. each column is spaced apart
by 1./fs seconds
times : np.ndarray
Times of the start of each column in the piano roll.
Default None which is np.arange(0, get_end_time(), 1./fs)
Returns
-------
piano_roll : np.ndarray, shape=(12,times.shape[0])
Chromagram of MIDI data, flattened across instruments
"""
# First, get the piano roll
piano_roll = self.get_piano_roll(fs=fs, times=times)
# Fold into one octave
chroma_matrix = np.zeros((12, piano_roll.shape[1]))
for note in range(12):
chroma_matrix[note, :] = np.sum(piano_roll[note::12], axis=0)
return chroma_matrix
def synthesize(self, fs=44100, wave=np.sin):
"""Synthesize the pattern using some waveshape. Ignores drum track.
Parameters
----------
fs : int
Sampling rate of the synthesized audio signal, default 44100
wave : function
Function which returns a periodic waveform,
e.g. np.sin, scipy.signal.square, etc. Default np.sin
Returns
-------
synthesized : np.ndarray
Waveform of the MIDI data, synthesized at fs
"""
# If there are no instruments, return an empty array
if len(self.instruments) == 0:
return np.array([])
# Get synthesized waveform for each instrument
waveforms = [i.synthesize(fs=fs, wave=wave) for i in self.instruments]
# Allocate output waveform, with #sample = max length of all waveforms
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms]))
# Sum all waveforms in
for waveform in waveforms:
synthesized[:waveform.shape[0]] += waveform
# Normalize
synthesized /= np.abs(synthesized).max()
return synthesized
def fluidsynth(self, fs=44100, sf2_path=None):
"""Synthesize using fluidsynth.
Parameters
----------
fs : int
Sampling rate to synthesize
sf2_path : str
Path to a .sf2 file.
Default None, which uses the TimGM6mb.sf2 file included with
pretty_midi.
Returns
-------
synthesized : np.ndarray
Waveform of the MIDI data, synthesized at fs
"""
# If there are no instruments, or all instruments have no notes, return
# an empty array
if len(self.instruments) == 0 or all(len(i.notes) == 0
for i in self.instruments):
return np.array([])
# Get synthesized waveform for each instrument
waveforms = [i.fluidsynth(fs=fs,
sf2_path=sf2_path) for i in self.instruments]
# Allocate output waveform, with #sample = max length of all waveforms
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms]))
# Sum all waveforms in
for waveform in waveforms:
synthesized[:waveform.shape[0]] += waveform
# Normalize
synthesized /= np.abs(synthesized).max()
return synthesized
def tick_to_time(self, tick):
"""Converts from an absolute tick to time in seconds using
self.__tick_to_time
Parameters
----------
tick : int
absolute tick to convert
Returns
-------
time : float
time in seconds of tick
"""
# Check that the tick isn't too big
if tick >= MAX_TICK:
raise IndexError('Supplied tick is too large.')
# If we haven't compute the mapping for a tick this large, compute it
if tick >= len(self.__tick_to_time):
self._update_tick_to_time(tick)
# Ticks should be integers
if type(tick) != int:
warnings.warn('tick should be an int.')
# Otherwise just return the time
return self.__tick_to_time[int(tick)]
def time_to_tick(self, time):
"""Converts from a time in seconds to absolute tick using
self.__tick_scales
Parameters
----------
time : float
Time, in seconds
Returns
-------
tick : int
Absolute tick corresponding to the supplied time
"""
# Ticks will be accumulated over tick scale changes
tick = 0
# Iterate through all the tempo changes (tick scale changes!)
for change_tick, tick_scale in reversed(self.__tick_scales):
change_time = self.tick_to_time(change_tick)
if time > change_time:
tick += (time - change_time)/tick_scale
time = change_time
return int(tick)
def adjust_times(self, original_times, new_times):
"""Adjusts the timing of the events in the MIDI object.
The parameters `original_times` and `new_times` define a mapping, so
that if an event originally occurs at time `original_times[n]`, it
will be moved so that it occurs at `new_times[n]`. If events don't
occur exactly on a time in `original_times`, their timing will be
linearly interpolated.
Parameters
----------
original_times : np.ndarray
Times to map from
new_times : np.ndarray
New times to map to
"""
# Only include notes within start/end time of the provided times
for instrument in self.instruments:
valid_notes = []
for note in instrument.notes:
if note.start >= original_times[0] and \
note.end <= original_times[-1]:
valid_notes.append(copy.deepcopy(note))
instrument.notes = valid_notes
# Get array of note-on locations and correct them
note_ons = np.array([note.start for instrument in self.instruments
for note in instrument.notes])
aligned_note_ons = np.interp(note_ons, original_times, new_times)
# Same for note-offs
note_offs = np.array([note.end for instrument in self.instruments
for note in instrument.notes])
aligned_note_offs = np.interp(note_offs, original_times, new_times)
# Same for pitch bends
pitch_bends = np.array([bend.time for instrument in self.instruments
for bend in instrument.pitch_bends])
aligned_pitch_bends = np.interp(pitch_bends, original_times, new_times)
ccs = np.array([cc.time for instrument in self.instruments
for cc in instrument.control_changes])
aligned_ccs = np.interp(ccs, original_times, new_times)
# Correct notes
for n, note in enumerate([note for instrument in self.instruments
for note in instrument.notes]):
note.start = (aligned_note_ons[n] > 0)*aligned_note_ons[n]
note.end = (aligned_note_offs[n] > 0)*aligned_note_offs[n]
# After performing alignment, some notes may have an end time which is
# on or before the start time. Remove these!
self.remove_invalid_notes()
# Correct pitch changes
for n, bend in enumerate([bend for instrument in self.instruments
for bend in instrument.pitch_bends]):
bend.time = (aligned_pitch_bends[n] > 0)*aligned_pitch_bends[n]
for n, cc in enumerate([cc for instrument in self.instruments
for cc in instrument.control_changes]):
cc.time = (aligned_ccs[n] > 0)*aligned_ccs[n]
def remove_invalid_notes(self):
"""Removes any notes which have an end time <= start time.
"""
# Simply call the child method on all instruments
for instrument in self.instruments:
instrument.remove_invalid_notes()
def write(self, filename):
"""Write the PrettyMIDI object out to a .mid file
Parameters
----------
filename : str
Path to write .mid file to
"""
# Initialize list of tracks to output
tracks = []
# Create track 0 with timing information
timing_track = midi.Track(tick_relative=False)
# Not sure if time signature is actually necessary
timing_track += [midi.TimeSignatureEvent(tick=0, data=[4, 2, 24, 8])]
# Add in each tempo change event
for (tick, tick_scale) in self.__tick_scales:
tempo_event = midi.SetTempoEvent(tick=tick)
# Compute the BPM
tempo_event.set_bpm(60.0/(tick_scale*self.resolution))
timing_track += [tempo_event]
# Add in each time signature
for ts in self.time_signature_changes:
midi_ts = midi.events.TimeSignatureEvent()
midi_ts.set_numerator(ts.numerator)
midi_ts.set_denominator(ts.denominator)
midi_ts.tick = self.time_to_tick(ts.time)
timing_track += [midi_ts]
# Add in each key signature
for ks in self.key_signature_changes:
midi_ks = midi.events.KeySignatureEvent()
mode, num_accidentals = key_number_to_mode_accidentals(
ks.key_number)
midi_ks.set_alternatives(num_accidentals)
midi_ks.set_minor(mode)
midi_ks.tick = self.time_to_tick(ks.time)
timing_track += [midi_ks]
# Sort the (absolute-tick-timed) events.
timing_track.sort(key=lambda event: event.tick)
# Add in an end of track event
timing_track += [midi.EndOfTrackEvent(tick=timing_track[-1].tick + 1)]
tracks += [timing_track]
# Create a list of possible channels to assign - this seems to matter
# for some synths.
channels = range(16)
# Don't assign the drum channel by mistake!
channels.remove(9)
for n, instrument in enumerate(self.instruments):
# Initialize track for this instrument
track = midi.Track(tick_relative=False)
# If it's a drum event, we need to set channel to 9
if instrument.is_drum:
channel = 9
# Otherwise, choose a channel from the possible channel list
else:
channel = channels[n % len(channels)]
# Set the program number
program_change = midi.ProgramChangeEvent(tick=0)
program_change.set_value(instrument.program)
program_change.channel = channel
track += [program_change]
# Add all note events
for note in instrument.notes:
# Construct the note-on event
note_on = midi.NoteOnEvent(tick=self.time_to_tick(note.start))
note_on.set_pitch(note.pitch)
note_on.set_velocity(note.velocity)
note_on.channel = channel
# Also need a note-off event (note on with velocity 0)
note_off = midi.NoteOnEvent(tick=self.time_to_tick(note.end))
note_off.set_pitch(note.pitch)
note_off.set_velocity(0)
note_off.channel = channel
# Add notes to track
track += [note_on, note_off]
# Add all pitch bend events
for bend in instrument.pitch_bends:
tick = self.time_to_tick(bend.time)
bend_event = midi.PitchWheelEvent(tick=tick)
bend_event.set_pitch(bend.pitch)
bend_event.channel = channel
track += [bend_event]
# Add all control change events
for control_change in instrument.control_changes:
tick = self.time_to_tick(control_change.time)
control_event = midi.ControlChangeEvent(tick=tick)
control_event.set_control(control_change.number)
control_event.set_value(control_change.value)
control_event.channel = channel
track += [control_event]
# Sort all the events by tick time before converting to relative
tick_sort = np.argsort([event.tick for event in track])
track = midi.Track([track[n] for n in tick_sort],
tick_relative=False)
# If there's a note off event and a note on event with the same
# tick and pitch, put the note off event first
for n, (event1, event2) in enumerate(zip(track[:-1], track[1:])):
if (event1.tick == event2.tick and
event1.name == 'Note On' and
event2.name == 'Note On' and
event1.pitch == event2.pitch and
event1.velocity != 0 and
event2.velocity == 0):
track[n] = event2
track[n + 1] = event1
# Finally, add in an end of track event
track += [midi.EndOfTrackEvent(tick=track[-1].tick + 1)]
# Add to the list of output tracks
tracks += [track]
# Construct an output pattern with the currently stored resolution
output_pattern = midi.Pattern(resolution=self.resolution,
tracks=tracks,
tick_relative=False)
# Turn ticks to relative, it doesn't work otherwise
output_pattern.make_ticks_rel()
# Write it out
midi.write_midifile(filename, output_pattern)
| {
"content_hash": "8625744d163ff55065f99db5cbc02bc6",
"timestamp": "",
"source": "github",
"line_count": 995,
"max_line_length": 81,
"avg_line_length": 43.210050251256284,
"alnum_prop": 0.5523794017769922,
"repo_name": "rafaelvalle/pretty-midi",
"id": "c71ed822cc1310a3811390ca7cc2941393d8ab80",
"size": "42994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pretty_midi/pretty_midi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89925"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, TextAreaField
from wtforms.validators import Required, Length
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True
class PostForm(Form):
post = TextField('post', validators = [Required()])
| {
"content_hash": "518ff42357211474a52a5fa788ad93fa",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 102,
"avg_line_length": 37.733333333333334,
"alnum_prop": 0.6413427561837456,
"repo_name": "mrzacarias/zaca_shortener",
"id": "a0f2eb5cf56fa0a43a8cc0134548364e2c047d6a",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131061"
}
],
"symlink_target": ""
} |
__version__ = '1.0.0'
__author__ = 'Christian Brickhouse'
import re
from bs4 import BeautifulSoup as soup
from nltk.parse.corenlp import CoreNLPDependencyParser
class Game:
"""An object that represents a Jeopardy game and structures related data.
The Game class creates an object that structures and contains data on a
given Jeopardy game from the j-archive. It contains data related to the
game as a whole and contains all of the clues (represented as Clue objects)
within it.
Attributes:
id_ The game id used in the j-archive url.
title The title of the game, including the game number and date.
game_number The number of the game in sequence from the first, different
from the id_. Game ID is j-archive specific, but
game_number is numbered from the start of Jeopardy.
date The date the game aired in the format '(D)D Mon YYYY'.
day The day of the month on which the game aired.
month The month in which the game aired.
year The year in which the game aired.
raw_clues The bs4 tag elements for each of the clues.
categories A dictionary, with round names as keys, containing lists of
category names.
clues A dictionary, with round names as keys, containing lists of
all the Clue objects for that round.
TO ADD:
* Various objects related to score statistics and team
batting average.
Methods:
__init__ Initializes the game object.
score_graph Return and plot data on score by clue.
conll Dependency parse the clues.
"""
title_regex = re.compile(r'#(\d+).*?([a-zA-Z]+), ([a-zA-Z]+) (\d+), (\d+)')
rounds = ['jeopardy_round','double_jeopardy_round','final_jeopardy_round']
def __init__(self,
page_source = None,
url = None,
load = False,
**kwargs
):
"""Initialize important meta-data on the game."""
if load:
self.loaded = True
self._load(**kwargs)
return(None)
else:
self.loaded = False
self.id_ = url.split('=')[-1]
self._page_source = page_source
self._parsed_html = soup(self._page_source,"html.parser")
self.title = self._parsed_html.body.find(
'div',
attrs={'id':'game_title'}
).text
num,dow,mon,day,year = re.search(self.title_regex,self.title).groups()
self.game_number = num
self.weekday = dow
self.month = mon
self.day = day
self.date = ' '.join([day,mon,year])
self.year = year
self._set_contestants()
self.score_data = None
notEmpty = self._set_raw_clues()
if notEmpty:
self.empty = False
self._set_categories()
self._parse_clues()
else:
self.empty = True
self.clues={}
for round_ in self.rounds:
self.clues[round_] = []
self.categories = {
'jeopardy_round':[],
'double_jeopardy_round':[],
'final_jeopardy_round':[]
}
def score_graph(self, plt=None):
"""
If given a matplotlib instance, will plot each player's
score by clue, otherwise returns a the data as dictionary
where keys are contestant names and values are lists of
the data used to plot.
"""
if self.score_data == None:
self.score_data = self._make_score_data()
if self.score_data == False:
raise ValueError('Unable to get score data.')
if plt == None:
return(self.score_data)
else:
for cont in self.contestants:
plt.plot(cont.score_series)
plt.show()
return()
def conll(self,parser,txt='all',jeopardy_round='all',style=10):
clues = self.clues
if jeopardy_round == 'all':
rounds = [
'jeopardy_round',
'double_jeopardy_round',
'final_jeopardy_round'
]
elif type(jeopardy_round) != list:
rounds = [jeopardy_round]
try:
for round_ in rounds:
for clue in clues[round_]:
clue.conll(parser,txt='text',style=style)
clue.conll(parser,txt='responses',style=style)
except NameError as e:
print(f'ValueError: {round_} is not a name of a jeopardy round.')
def _make_score_data(self):
max_i = 0
for clue in self.clues['jeopardy_round']:
if clue.order_num == None:
continue
i = clue.order_num
resp = clue.correct('all')
for contestant in self.contestants:
x = contestant._update_series(clue, resp, i)
if x == False:
return(False)
if i > max_i:
max_i = i
offset = max_i
for clue in self.clues['double_jeopardy_round']:
if clue.order_num == None:
continue
i = clue.order_num + offset
resp = clue.correct('all')
for contestant in self.contestants:
contestant._update_series(clue, resp, i)
if i > max_i:
max_i = i
for clue in self.clues['final_jeopardy_round']:
i = max_i + 1
resp = clue.correct('all')
for contestant in self.contestants:
contestant._update_series(clue, resp, i, fj=True)
contestant.score_series = contestant.score_series[:max_i+2]
for cont in self.contestants:
cont._make_series()
ret_dict = {}
for cont in self.contestants:
series = cont.score_series
ret_dict[cont.name] = series
return(ret_dict)
def _load(self,**kwargs):
"""Set attributes based on given data.
Called by Scraper.load() via Game.__init__(), it takes in JSON formatted
data and sets the public attributes. Private attributes (page source
and bs4 trees notably) are not loaded as they are not saved.
"""
self.id_ = kwargs['id_']
self.title = kwargs['title']
self.game_number = kwargs['game_number']
self.weekday = kwargs['weekday']
self.month = kwargs['month']
self.day = kwargs['day']
self.date = kwargs['date']
self.year = kwargs['year']
self.categories = kwargs['categories']
self.clues = {
'jeopardy_round':[],
'double_jeopardy_round':[],
'final_jeopardy_round':[]
}
for clue in kwargs['clues']:
rnd = clue['round_']
if rnd == 'final_jeopardy_round':
self.clues[rnd].append(FinalJeopardyClue(
game=self,
load=True,
**clue
))
else:
#print(rnd)
self.clues[rnd].append(Clue(game=self,load=True,**clue))
def _set_raw_clues(self):
"""Add all bs4 Tag objects for clues to a list, self.raw_clues"""
self.raw_clues = self._parsed_html.body.find_all(
'td',
attrs={'class':'clue'}
)
if self.raw_clues == None or len(self.raw_clues) == 0:
print('Game has no clues, moving on...')
self.raw_clues = []
return(False)
return(True)
def _set_categories(self):
"""Create data structure of categories used in the game.
A list of categories used in each round is stored in a dictionary whose
keys are the various round names. This structure is stored as
self.categories.
"""
catsByRound = {
'jeopardy_round':[],
'double_jeopardy_round':[],
'final_jeopardy_round':[]
}
for round_ in self.rounds:
round_tag = self._parsed_html.body.find('div',attrs={'id':round_})
gen = round_tag.find_all(
'td',
attrs={'class':'category_name'}
) # needs better name than 'gen'
for category in gen:
catsByRound[round_].append(category.text.lower())
self.categories = catsByRound
def _parse_clues(self):
"""Create a Clue object for each clue in self.raw_clues"""
if len(self.raw_clues) == 0:
raise ValueError('Game has no clues yet _parse_clues was called?')
self.clues = {
'jeopardy_round':[],
'double_jeopardy_round':[],
'final_jeopardy_round':[]
}
for clue in self.raw_clues:
if len(clue.contents) == 1:
continue # Skip clues that went unrevealed.
for parent in clue.parents:
if 'id' in parent.attrs:
round_ = parent['id']
break
if round_ == 'final_jeopardy_round':
self.clues[round_].append(FinalJeopardyClue(clue,self))
elif round_ in ['jeopardy_round','double_jeopardy_round']:
self.clues[round_].append(Clue(clue,self,round_))
else:
raise ValueError(f"Unknown round: {round_}")
def _set_contestants(self):
"""Return a list of Contestant objects."""
self.contestants = []
raw_contestants = self._parsed_html.body.find_all(
'p',
attrs={'class':'contestants'}
)
for cont in raw_contestants:
flavor = cont.text
name = cont.a.text
link = cont.a['href']
self.contestants.append(Contestant(name,link,flavor))
def __dict__(self):
"""Return a dictionary of public attributes"""
clues = []
for round_ in self.clues.keys():
for clue in self.clues[round_]:
clues.append(clue.__dict__())
dictionary = {
'id_':self.id_,
'title':self.title,
'game_number':self.game_number,
'weekday':self.weekday,
'month':self.month,
'day':self.day,
'date':self.date,
'year':self.year,
'categories':self.categories,
'clues':clues,
}
return(dictionary)
class Clue:
"""An object representing and containing data on a particular Jeopardy clue.
This class and its associated methods compile and structure data on a
Jeopardy clue ib relation to the game object it is associated with. It is
called by Game._parse_clues but can be constructed individually if given a
proper bs4 tag object and a game object.
Attributes:
tag_obj The bs4 tag object from parsing the j-archive data which
contains the associated data for the clue.
game The Game object this clue is associated with.
round_ The round this clue is from.
category The name of the category for the clue.
value How much is won (or lost) given an (in)correct response.
This is usually the facial value of the clue but for
Daily Doubles (ie, daily_double == True), it is the
amount the contestant wagered.
row The row the clue is found on, indexed from 1, not 0.
column The column the clue is found on, indexed from 1.
daily_double A boolean stating whether the clue was a Daily Double.
text The text of the clue.
annotation Any associated commentary with the clue other than the
correct response.
target The correct response.
loaded Boolean that is True if instance was loaded from JSON and
False otherwise.
order_num An integer representing the clue number, that is, 1 is the
first revealed clue, 10 is the tenth revealed clue, etc.
Numbers are only for the current round.
text_conll Tab seperated string of the dependency tree in CoNLL form.
responses_conll List of tuples where the first tuple item is the speaker
and the second item is a tab seperated string of the
dependency tree in CoNLL format.
*correct (Made method in 0.6.0)
Methods:
__init__ Initializes the Clue object and calls the various
functions to set the attributes.
conll Returns a dependency parse of the clue text or responses
in CoNLL format.
"""
response_regex = re.compile(r"stuck', '(.*)<em")
wasCorrect_regex = re.compile(r'<td class="(right|wrong)">(.*?)<\/td>')
target_regex = re.compile(r"correct_response.+?>(.*)</em>")
def __init__(
self,
bs4_tag=None,
game=None,
round_=None,
load=False,
**kwargs
):
self.round_ = round_
if game and load:
self.loaded = True
self._load(game,**kwargs)
return(None)
self.loaded = False
self.tag_obj = bs4_tag
self.game = game
self.text_conll = None
self.responses_conll = None
self._set_text()
self._set_responses()
if round_ != 'final_jeopardy_round':
self._set_value()
try:
self.order_num = int(
self.tag_obj.find(
'td',
attrs={'class':'clue_order_number'}
).text
)
except AttributeError:
print(f'Unknown clue order in game {self.game.title}, {round_}')
self.order_num = None
def correct(self,method='any'):
"""Return, among other options, whether the clue was answered correctly.
By default, returns True if any response to the clue was correct.
Specifying a different method argument changes that functionality and
can provide whether there were any wrong responses, the truth value of
all given responses, and the number of responses among others, see
list of options.
Arguments:
method How the function should determine what value to return,
take one of the following:
a any Return True if any response was
correct.
af any-false Return True if any response was not
correct. Opposite of 'any'.
nc no-correct Return True iff no responses were
correct.
fr first-response Return True if the first response
was correct, False if incorrect
or not answered.
all Return a list of tuples with
contestant names as the first
tuple value and the truth values
for the response as the second.
True is a correct response and
False is not a correct response.
l length Return the number of responses.
"""
truth_list = [x[1] for x in self._correct_]
if method == 'any' or method == 'a':
if True in truth_list:
return(True)
else:
return(False)
elif method == 'any-false' or method == 'af':
if False in truth_list:
return(True)
else:
return(False)
elif method == 'no-correct' or method == 'nc':
if True not in truth_list:
return(True)
else:
return(False)
elif method == 'first-response' or method == 'fr':
return(self._correct_[0][1])
elif method == 'all':
return(self._correct_)
elif method == 'length' or method == 'l':
return(len(self._correct_))
else:
raise ValueError(f"Unknown method argument {method}")
def conll(self, parser, txt='text', style=10):
if type(parser) != CoreNLPDependencyParser:
raise TypeError('Parser must be a CoreNLP Dependency Parser.')
if txt == 'text':
# CoNLL format for clue text.
txt = self.text
p,=parser.raw_parse(txt)
self.text_conll = p.to_conll(style)
return(self.text_conll)
elif txt == 'responses':
responses_conll = []
for r in self.responses:
contestant = r[0]
text = r[1]
if text == '':
continue
#print(self.order_num)
#print(contestant,repr(text))
p,=parser.raw_parse(text)
conll = p.to_conll(style)
tup = (contestant,conll)
responses_conll.append(tup)
self.responses_conll = responses_conll
return(responses_conll)
def _load(self,game,rnd=None,**kwargs):
"""Set public attributes from JSON input"""
#print(kwargs.keys())
self.game = game
#self.round_ = rnd
self.text = kwargs['text']
self.category = kwargs['category']
self.row = kwargs['row']
self.column = kwargs['column']
self.target = kwargs['target']
self.annotation = kwargs['annotation']
self.text_conll = kwargs['text_conll']
self.responses_conll = kwargs['responses_conll']
if self.round_ != 'final_jeopardy_round':
self.order_num = kwargs['order_num']
self.daily_double = kwargs['daily_double']
self.value = kwargs['value']
self._correct_ = kwargs['correct']
self.responses = kwargs['responses']
def _set_text(self):
"""Set the text of the clue."""
clue_tag = self.tag_obj.find('td',attrs={'class':'clue_text'})
self.text = clue_tag.text
self._set_category(clue_tag['id'])
def _set_category(self,id_str):
"""Set the category of the clue and its coordinates on the board."""
if id_str == 'clue_FJ':
rnd = 'FJ'
else:
rnd,col,row = id_str.split('_')[1:]
if (
(rnd == 'J' and self.round_ != 'jeopardy_round') or
(rnd == 'DJ' and self.round_ != 'double_jeopardy_round') or
(rnd == 'FJ' and self.round_ != 'final_jeopardy_round')
):
print('Rounds do not match for %s,\n\
defaulting to round used in coordinates.' % id_str)
if rnd == 'J':
self.round_ = 'jeopardy_round'
elif rnd == 'DJ':
self.round_ = 'double_jeopardy_round'
elif rnd == 'FJ':
self.round_ = 'final_jeopardy_round'
cats = self.game.categories[self.round_]
if rnd == 'FJ':
self.row = None
self.column = None
self.category = cats[0]
return()
self.row = int(row)
self.column = int(col)
self.category = cats[self.column-1]
def _set_responses(self):
"""Parse the response text and set various response variables."""
annotation = None
tag = self.tag_obj
if self.round_ == 'final_jeopardy_round':
for parent in self.tag_obj.parents:
if parent.has_attr('id'):
if parent['id'] == 'final_jeopardy_round':
tag = parent
for div in tag.find_all('div'):
if div.has_attr('onmouseover'):
annotation = div['onmouseover']
break
if annotation == None:
raise ValueError('Clue has no response?')
try:
self.target = re.search(self.target_regex,annotation).group(1)
except AttributeError:
raise ValueError('Clue has no correct response?')
self.annotation = re.search(self.response_regex,annotation).group(1)
if self.round_ == 'final_jeopardy_round':
return()
self._correct_ = []
responses = re.findall(self.wasCorrect_regex,annotation)
if responses == []:
print('Unknown whether response was correct or not.\n' +
'Here\'s diagnostic info:\n\tGame id: %s\n' +
'\tDate: %s\n\tRound: %s\n\tClue coords (row,col): %s, %s' % (
self.game.id_,
self.game.date,
self.round_,
self.row,
self.col
))
return()
for response in responses:
player = response[1]
if response[0] == 'right':
self._correct_.append((player,True))
elif response[0] == 'wrong':
self._correct_.append((player,False))
quotation = re.findall(r'\((.*?)\)',self.annotation) #not sure if self.annotation or annotation is correct
self.responses=[]
for match in quotation:
msplit = match.split(':')
speaker = msplit[0]
speech = ':'.join(msplit[1:])
if '[*]' in speech:
speech = speech.replace(r'[*]',self.target)
self.responses.append((speaker.strip(),speech.strip()))
def _set_value(self):
"""Set the dollar amount the clue was worth.
Value is stored as an int in self.value and represents the dollar amount
won or lost by a correct or incorrect response. It determines whether
the clue is a daily double and sets self.daily_double as a boolean.
"""
if self.round_ == 'final_jeopardy_round':
return()
val = self.tag_obj.find(
'td',
attrs={'class':'clue_value'}
)
if val == None:
val = self.tag_obj.find(
'td',
attrs={'class':'clue_value_daily_double'}
)
if val == None:
raise ValueError('Clue has no value?')
else:
self.daily_double = True
# remove the 'DD: $' that precedes DD values and any commas.
self.value = int(val.text[5:].replace(',',''))
else:
self.daily_double = False
self.value = int(val.text.strip().strip('$'))
def __str__(self):
"""Return the clue text."""
return(self.text)
def __dict__(self):
"""Return a dictionary of public attributes."""
if self.round_ == 'final_jeopardy_round':
dictionary = {
'round_':self.round_,
'text':self.text,
'category':self.category,
'row':self.row,
'column':self.column,
'target':self.target,
'annotation':self.annotation,
}
else:
dictionary = {
'round_':self.round_,
'order_num':self.order_num,
'daily_double':self.daily_double,
'value':self.value,
'text':self.text,
'row':self.row,
'column':self.column,
'category':self.category,
'target':self.target,
'annotation':self.annotation,
'correct':self._correct_,
'responses':self.responses,
}
dictionary['text_conll'] = self.text_conll
dictionary['responses_conll'] = self.responses_conll
return(dictionary)
class FinalJeopardyClue(Clue):
"""An extension of the Clue class to handle Final Jeopardy data.
Attributes:
Defined here:
wagers A list of the amount each contestant wagered.
contestants A list of the first names of each contestant who
participated in Final Jeopardy.
responses A list of the responses each contestant gave.
Inhereted from Clue (see documentation there):
category
text
annotation
target
round_
row
column
"""
fj_regex = re.compile(r'<td(?: class=\"(.*?)\"|.*?)>(.*?)</td>')
def __init__(self,bs4_tag=None,game=None,load=False,**kwargs):
self.round_ = 'final_jeopardy_round'
if game and load:
self.loaded = True
self._load(game,**kwargs)
return(None)
self.loaded = False
super().__init__(bs4_tag,game,'final_jeopardy_round')
matches = re.findall(self.fj_regex,self.annotation)
count = 0
fj_data = []
response = []
#print(self.annotation)
for match in matches:
response.append(match)
count+=1
if count == 3:
count = 0
fj_data.append(response)
response=[]
self.contestants = [x[0][1] for x in fj_data]
self.responses = [(x[0][1],x[1][1]) for x in fj_data]
self.wagers = [int(x[2][1].replace(',','').strip('$')) for x in fj_data]
correct = [x[0][0] for x in fj_data]
#print(self.contestants)
#print(self.responses)
#print(self.wagers)
#print(correct)
correctval = []
for value in correct:
if value == 'wrong':
correctval.append(False)
elif value == 'right':
correctval.append(True)
else:
raise ValueError('Response neither right nor wrong?')
#print(correctval)
#print(repr(fj_data))
self._correct_ = []
for i in range(len(self.contestants)):
cont = self.contestants[i]
val = self.wagers[i]
tv = correctval[i]
self._correct_.append(
(
cont,
tv,
val
)
)
#print(self._correct_)
def correct(self,method='any',contestant=None):
"""Extends Clue.correct() to retrieve particular contestant responses.
By default, the function returns whether any contestant responded
correctly. If no contestant is specified, the method parameter is
passed to Clue.correct() and that value returned, if a contestant
is specified, method is meaningless and disregarded.
If contestant is an integer, it is treated as an index and the boolean
at that location in FJC._correct_ is returned. If it is a string
it is treated as a contestant's name and the boolean for that
contestant's response is returned. If it is a list or tuple, each
element is passed individually to FJC.clue to be evaluated
recursively and a list of the returns is returned.
"""
c_type = type(contestant)
if contestant == None:
if method == 'all':
return(self._correct_)
return(super().correct(method))
elif c_type is int:
return(self._correct_[contestant])
elif c_type is str:
i = self.contestants.index(contestant)
return(self._correct_[i])
elif c_type in [list,tuple]:
out = []
for item in contestant:
out.append(self.correct(item))
return(out)
else:
raise TypeError(f'Type {c_type.__name__} not supported')
def _load(self,game,**kwargs):
"""Set public attributes from JSON input."""
super()._load(game,**kwargs)
self.wagers = kwargs['wagers']
self.responses = kwargs['responses']
self.contestants = kwargs['contestants']
self._correct_ = kwargs['correct']
def __dict__(self):
"""Return dictionary of public attributes."""
d = super().__dict__()
d['correct'] = self._correct_
d['wagers'] = self.wagers
d['responses'] = self.responses
d['contestants'] = self.contestants
return(d)
class Contestant():
def __init__(self, name, link, flavor=''):
self.name = name
self.first_name = name.split(" ")[0]
self.link = link
self.flavor = flavor
self.score_series = [0]*70
def _update_series(self, clue, resp, i, fj = False):
guessers = [x[0] for x in resp]
if fj == True:
pass
#print(resp)
#print(guessers)
if self.first_name in guessers:
tv = [x[1] for x in resp if x[0] == self.first_name][0]
if fj == True:
val = [x[2] for x in resp if x[0] == self.first_name][0]
val = int(val.strip()[1:].replace(',',''))
else:
val = clue.value
if tv == False:
val = -1 * val
try:
self.score_series[i] = val
except:
print(i)
return(False)
def _make_series(self):
for i in range(len(self.score_series)):
if i == 0:
continue
self.score_series[i] = self.score_series[i-1] + \
self.score_series[i]
| {
"content_hash": "d6afe404b33b6ec33768a768105d68c5",
"timestamp": "",
"source": "github",
"line_count": 777,
"max_line_length": 115,
"avg_line_length": 38.85199485199485,
"alnum_prop": 0.5131177951503909,
"repo_name": "chrisbrickhouse/jeopardy",
"id": "517f688b964a715b3fcc80f697f582cafdd91aff",
"size": "30188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Game.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40270"
}
],
"symlink_target": ""
} |
from spack.package import *
class Arbor(CMakePackage, CudaPackage):
"""Arbor is a high-performance library for computational neuroscience
simulations."""
homepage = "https://arbor-sim.org"
git = "https://github.com/arbor-sim/arbor.git"
url = "https://github.com/arbor-sim/arbor/releases/download/v0.7/arbor-v0.7-full.tar.gz"
maintainers = ["bcumming", "brenthuisman", "haampie", "schmitts"]
version("master", branch="master", submodules=True)
version(
"0.7",
sha256="c3a6b7193946aee882bb85f9c38beac74209842ee94e80840968997ba3b84543",
url="https://github.com/arbor-sim/arbor/releases/download/v0.7/arbor-v0.7-full.tar.gz",
)
version(
"0.6",
sha256="4cd333b18effc8833428ddc0b99e7dc976804771bc85da90034c272c7019e1e8",
url="https://github.com/arbor-sim/arbor/releases/download/v0.6/arbor-v0.6-full.tar.gz",
)
version(
"0.5.2",
sha256="290e2ad8ca8050db1791cabb6b431e7c0409c305af31b559e397e26b300a115d",
url="https://github.com/arbor-sim/arbor/releases/download/v0.5.2/arbor-v0.5.2-full.tar.gz",
)
version(
"0.5",
sha256="d0c8a4c7f97565d7c30493c66249be794d1dc424de266fc79cecbbf0e313df59",
url="https://github.com/arbor-sim/arbor/releases/download/v0.5/arbor-v0.5-full.tar.gz",
)
variant(
"assertions",
default=False,
description="Enable arb_assert() assertions in code.",
)
variant("doc", default=False, description="Build documentation.")
variant("mpi", default=False, description="Enable MPI support")
variant("neuroml", default=True, description="Build NeuroML support library.")
variant("python", default=True, description="Enable Python frontend support")
variant(
"vectorize",
default=False,
description="Enable vectorization of computational kernels",
)
# https://docs.arbor-sim.org/en/latest/install/build_install.html#compilers
conflicts("%gcc@:8.3")
conflicts("%clang@:7")
# Cray compiler v9.2 and later is Clang-based.
conflicts("%cce@:9.1")
conflicts("%intel")
depends_on("cmake@3.12:", type="build")
# misc dependencies
depends_on("fmt@7.1:", when="@0.5.3:") # required by the modcc compiler
depends_on("nlohmann-json")
depends_on("random123")
depends_on("cuda@10:", when="+cuda")
depends_on("libxml2", when="+neuroml")
# mpi
depends_on("mpi", when="+mpi")
depends_on("py-mpi4py", when="+mpi+python", type=("build", "run"))
# python (bindings)
extends("python", when="+python")
depends_on("python@3.7:", when="+python", type=("build", "run"))
depends_on("py-numpy", when="+python", type=("build", "run"))
with when("+python"):
depends_on("py-pybind11@2.6:", type=("build"))
depends_on("py-pybind11@2.8.1:", when="@0.5.3:", type=("build"))
# sphinx based documentation
depends_on("python@3.7:", when="+doc", type="build")
depends_on("py-sphinx", when="+doc", type="build")
depends_on("py-svgwrite", when="+doc", type="build")
@property
def build_targets(self):
return ["all", "html"] if "+doc" in self.spec else ["all"]
def cmake_args(self):
args = [
self.define_from_variant("ARB_WITH_ASSERTIONS", "assertions"),
self.define_from_variant("ARB_WITH_MPI", "mpi"),
self.define_from_variant("ARB_WITH_NEUROML", "neuroml"),
self.define_from_variant("ARB_WITH_PYTHON", "python"),
self.define_from_variant("ARB_VECTORIZE", "vectorize"),
]
if "+cuda" in self.spec:
args.append("-DARB_GPU=cuda")
# query spack for the architecture-specific compiler flags set by its wrapper
args.append("-DARB_ARCH=none")
opt_flags = self.spec.target.optimization_flags(
self.spec.compiler.name, self.spec.compiler.version
)
args.append("-DARB_CXX_FLAGS_TARGET=" + opt_flags)
return args
| {
"content_hash": "cf1d3e7907122dab9efc2e8c44033ff5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 99,
"avg_line_length": 37.867924528301884,
"alnum_prop": 0.6337817638266069,
"repo_name": "eth-cscs/nestmc-proto",
"id": "56791d87b3bfa8f2bfc8addb4174eeda5f9564ce",
"size": "4212",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spack/package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "9796"
},
{
"name": "C++",
"bytes": "3223191"
},
{
"name": "CMake",
"bytes": "69102"
},
{
"name": "Cuda",
"bytes": "70752"
},
{
"name": "Julia",
"bytes": "15582"
},
{
"name": "Makefile",
"bytes": "577"
},
{
"name": "Python",
"bytes": "39436"
},
{
"name": "Shell",
"bytes": "2582"
}
],
"symlink_target": ""
} |
import sys
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_dna
from Bio import SeqIO
import numpy
from scipy import misc
import itertools as it
import os
import time
import random
import multiprocessing
from multiprocessing import Value
import Queue
MATERNALFASTAFILENAMELISTFILENAME = sys.argv[1]
PATERNALFASTAFILENAMELISTFILENAME = sys.argv[2]
PEAKHEIGHTFILENAME = sys.argv[3]
SEQUENCELENGTH = int(sys.argv[4])
PNGFILENAMEPATH = sys.argv[5]
VALUEFILENAMEPATH = sys.argv[6]
OUTPUTFILENAMEPREFIX = sys.argv[7]
NSIMS = 10000
NPROC = 24
class ProcessSafeOPStream( object ):
def __init__( self, writeable_obj ):
self.writeable_obj = writeable_obj
self.lock = multiprocessing.Lock()
self.name = self.writeable_obj.name
return
def write( self, data ):
self.lock.acquire()
self.writeable_obj.write( data )
self.writeable_obj.flush()
self.lock.release()
return
def close( self ):
self.writeable_obj.close()
def makeSequenceDict():
# Make a dictionary that maps groups of 4 bases to numbers
sequenceDict = {}
baseList = ["A", "C", "G", "T"]
count = 0
for baseOne in baseList:
# Iterate through the first base options
for baseTwo in baseList:
# Iterate through the second base options
for baseThree in baseList:
# Iterate through the third base options
for baseFour in baseList:
# Iterate through the fourth base options
currentSequence = baseOne + baseTwo + baseThree + baseFour
sequenceDict[currentSequence] = count
count = count + 1
return sequenceDict
def initializeFilePrefixLists(maternalFastaFileNameList):
# Make arrays with all of the file names/prefixes, and initialize the sampleCounts to 0
pngFileNamePrefixList = []
valueFileNamePrefixMeanList = []
valueFileNamePrefixFCAList = []
valueFileNamePrefixFCBList = []
outputFileList = []
for i in range(len(maternalFastaFileNameList) - 1):
# Iterate through the first individuals and make the file name prefixes
maternalFastaFileNameA = maternalFastaFileNameList[i]
maternalFastaFileNameElementsA = maternalFastaFileNameA.split("/")
maternalFastaFileNameSuffixElementsA = maternalFastaFileNameElementsA[-1].split(".")
individualElementsA = maternalFastaFileNameSuffixElementsA[0].split("_")
for j in range(i + 1, len(maternalFastaFileNameList)):
# Iterate through the second individuals and make the file name prefixes
maternalFastaFileNameB = maternalFastaFileNameList[j]
maternalFastaFileNameElementsB = maternalFastaFileNameB.split("/")
maternalFastaFileNameSuffixElementsB = maternalFastaFileNameElementsB[-1].split(".")
individualElementsB = maternalFastaFileNameSuffixElementsB[0].split("_")
pngFileNamePrefixList.append(
PNGFILENAMEPATH + "/"
+ maternalFastaFileNameSuffixElementsA[0] + "_"
+ individualElementsB[-1])
valueFileNamePrefixMeanList.append(
VALUEFILENAMEPATH + "/"
+ maternalFastaFileNameSuffixElementsA[0] + "_"
+ individualElementsB[-1] + "_"
+ "Mean")
valueFileNamePrefixFCAList.append(
VALUEFILENAMEPATH + "/"
+ maternalFastaFileNameSuffixElementsA[0] + "_"
+ individualElementsB[-1] + "_"
+ "FC")
valueFileNamePrefixFCBList.append(
VALUEFILENAMEPATH + "/"
+ maternalFastaFileNameSuffixElementsB[0] + "_"
+ individualElementsA[-1] + "_"
+ "FC")
outputFileName = (
OUTPUTFILENAMEPREFIX + "_"
+ maternalFastaFileNameSuffixElementsA[0] + "_"
+ individualElementsB[-1] )
outputFileList.append(ProcessSafeOPStream(open(outputFileName, 'w+')))
return [pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList]
def getFastaList(fastaFileList):
# Get the next fasta from each file in a list
fastaList = []
fileCount = 0
for fastaFile in fastaFileList:
# Iterate through the fasta files and begin parsing each
fastaName = fastaFile.readline().strip()
# if we're at the end of the fasta file, then we are done
if fastaName == "": return None
# Load the sequence
fastaSequence = fastaFile.readline().strip()
DNASequenceRecord = SeqRecord(Seq(fastaSequence, generic_dna), name = fastaName)
fastaList.append(DNASequenceRecord)
return fastaList
def makeValueFiles(valueFileNamePrefixMean, valueFileNamePrefixFCA, valueFileNamePrefixFCB, peakHeightLineElements, peakHeightColA, peakHeightColB, sampleCount):
# Make files with the values
valueFileNameMean = valueFileNamePrefixMean + "_" + str(sampleCount) + "-" + str(sampleCount + 15) + ".txt"
valueFileNameFCA = valueFileNamePrefixFCA + "_" + str(sampleCount) + "-" + str(sampleCount + 7) + ".txt"
valueFileNameFCB = valueFileNamePrefixFCB + "_" + str(sampleCount + 8) + "-" + str(sampleCount + 15) + ".txt"
valueFileMean = open(valueFileNameMean, 'w+')
valueFileFCA = open(valueFileNameFCA, 'w+')
valueFileFCB = open(valueFileNameFCB, 'w+')
peakHeightA = float(peakHeightLineElements[peakHeightColA])
peakHeightB = float(peakHeightLineElements[peakHeightColB])
peakHeightMean = numpy.mean([peakHeightA, peakHeightB])
valueFileMean.write(str(peakHeightMean) + "\n")
valueFileMean.close()
peakHeightFCA = peakHeightA - peakHeightB
valueFileFCA.write(str(peakHeightFCA) + "\n")
valueFileFCA.close()
peakHeightFCB = peakHeightB - peakHeightA
valueFileFCB.write(str(peakHeightFCB) + "\n")
valueFileFCB.close()
return [valueFileNameMean, valueFileNameFCA, valueFileNameFCB]
def makeReverseComplements(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB):
# Make the reverse complements of all of the sequences
seqReverseMaternalA = seqRecordMaternalA.seq.reverse_complement().upper()
seqReversePaternalA = seqRecordPaternalA.seq.reverse_complement().upper()
seqReverseMaternalB = seqRecordMaternalB.seq.reverse_complement().upper()
seqReversePaternalB = seqRecordPaternalB.seq.reverse_complement().upper()
return [seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB]
def makeSampleImageMeanFC(sequenceOneA, sequenceTwoA, sequenceOneB, sequenceTwoB, pngFileNamePrefix, sampleCount, sequenceDict):
# Convert the fasta sequence pair to the (4 x 4) x (4 x 4) alphabet and record it in a 256 x sequence length image
# ASSUMES THAT sequenceOneA sequenceTwoA, sequenceOneB, and sequenceTwoB ARE THE SAME LENGTH (no indels)
sequenceArray = numpy.zeros((256, SEQUENCELENGTH))
for i in range(len(sequenceOneA)):
# Iterate through the bases mark a 1 in the appropriate base combination
sequenceToLookUp = sequenceOneA[i] + sequenceTwoA[i] + sequenceOneB[i] + sequenceTwoB[i]
if sequenceToLookUp not in sequenceDict.keys():
# The sequence has a wild card, so skip it
continue
sequenceArray[sequenceDict[sequenceToLookUp]][i] = 1
pngFileName = pngFileNamePrefix + "_" + str(sampleCount) + ".png"
misc.imsave(pngFileName, sequenceArray)
return pngFileName
def makeFourImageFiles(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB, pngFileNamePrefix, sampleCount, sequenceDict, outputFile, valueFileNameMean, valueFileNameFC):
# Make four image files for two sequence pairs
# Image file 1: (MaternalA, PaternalA), (MaternalB, PaternalB)
# Image file 2: (MaternalA, PaternalA), (PaternalB, MaternalB)
# Image file 3: (PaternalA, MaternalA), (MaternalB, PaternalB)
# Image file 4: (PaternalA, MaternalA), (PaternalB, MaternalB)
pngFileNameOne = makeSampleImageMeanFC(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB, pngFileNamePrefix, sampleCount, sequenceDict)
outputFile.write(pngFileNameOne + "\t" + valueFileNameMean + "\t" + valueFileNameFC + "\t" + "0" + "\n")
sampleCount = sampleCount + 1
pngFileNameTwo = makeSampleImageMeanFC(seqRecordMaternalA, seqRecordPaternalA, seqRecordPaternalB, seqRecordMaternalB, pngFileNamePrefix, sampleCount, sequenceDict)
outputFile.write(pngFileNameTwo + "\t" + valueFileNameMean + "\t" + valueFileNameFC + "\t" + "0" + "\n")
sampleCount = sampleCount + 1
pngFileNameThree = makeSampleImageMeanFC(seqRecordPaternalA, seqRecordMaternalA, seqRecordMaternalB, seqRecordPaternalB, pngFileNamePrefix, sampleCount, sequenceDict)
outputFile.write(pngFileNameThree + "\t" + valueFileNameMean + "\t" + valueFileNameFC + "\t" + "0" + "\n")
sampleCount = sampleCount + 1
pngFileNameFour = makeSampleImageMeanFC(seqRecordPaternalA, seqRecordMaternalA, seqRecordPaternalB, seqRecordMaternalB,pngFileNamePrefix, sampleCount, sequenceDict)
outputFile.write(pngFileNameFour + "\t" + valueFileNameMean + "\t" + valueFileNameFC + "\t" + "0" + "\n")
return sampleCount
def makeSingleSequenceInputsMeanFCPlusPlusParallel(((seqRecordMaternalA, seqRecordPaternalA), (seqRecordMaternalB, seqRecordPaternalB)), peakHeightLineElements, peakHeightColA, peakHeightColB, pngFileNamePrefix, valueFileNamePrefixMean, valueFileNamePrefixFCA, valueFileNamePrefixFCB, outputFile, sequenceDict, sampleCount):
# Make all of the sequence inputs for a fasta file
sampleCount = sampleCount + 1
[valueFileNameMean, valueFileNameFCA, valueFileNameFCB] = makeValueFiles(valueFileNamePrefixMean, valueFileNamePrefixFCA, valueFileNamePrefixFCB, peakHeightLineElements, peakHeightColA, peakHeightColB, sampleCount)
[seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB] = makeReverseComplements(seqRecordMaternalA, seqRecordPaternalA, seqRecordMaternalB, seqRecordPaternalB)
sampleCount = makeFourImageFiles(seqRecordMaternalA.seq.upper(), seqRecordPaternalA.seq.upper(), seqRecordMaternalB.seq.upper(), seqRecordPaternalB.seq.upper(), pngFileNamePrefix, sampleCount, sequenceDict, outputFile, valueFileNameMean, valueFileNameFCA)
sampleCount = sampleCount + 1
sampleCount = makeFourImageFiles(seqReverseMaternalA, seqReversePaternalA, seqReverseMaternalB, seqReversePaternalB, pngFileNamePrefix, sampleCount, sequenceDict, outputFile, valueFileNameMean, valueFileNameFCA)
sampleCount = sampleCount + 1
sampleCount = makeFourImageFiles(seqRecordMaternalB.seq.upper(), seqRecordPaternalB.seq.upper(), seqRecordMaternalA.seq.upper(), seqRecordPaternalA.seq.upper(), pngFileNamePrefix, sampleCount, sequenceDict, outputFile, valueFileNameMean, valueFileNameFCB)
sampleCount = sampleCount + 1
sampleCount = makeFourImageFiles(seqReverseMaternalB, seqReversePaternalB, seqReverseMaternalA, seqReversePaternalA, pngFileNamePrefix, sampleCount, sequenceDict, outputFile, valueFileNameMean, valueFileNameFCB)
def makeSingleSequenceInputsMeanFCPlusPlusParallelLoop(
maternalFastas,
paternalFastas,
peakHeightLineElements,
pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList,
sequenceDict,
sampleCount):
# Make all of the sequence images for all individual pairs for one sequence
fastaPairs = it.izip(maternalFastas, paternalFastas)
fastaCombinations = it.combinations(fastaPairs, 2)
peakHeightColA = 0
peakHeightColB = 1
for ( comb,
pngFileNamePrefix,
valueFileNamePrefixMean,
valueFileNamePrefixFCA,
valueFileNamePrefixFCB,
outputFile) in it.izip (
fastaCombinations,
pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList):
# make the images for each combination of sequences
makeSingleSequenceInputsMeanFCPlusPlusParallel(
comb, peakHeightLineElements,
peakHeightColA, peakHeightColB,
pngFileNamePrefix,
valueFileNamePrefixMean,
valueFileNamePrefixFCA,
valueFileNamePrefixFCB,
outputFile,
sequenceDict, sampleCount)
peakHeightColB = peakHeightColB + 1
if peakHeightColB >= len(maternalFastas):
# The sequence inputs for all of the pairs for
# the first individual in the current pair have been made
peakHeightColA = peakHeightColA + 1
peakHeightColB = peakHeightColA + 1
def makeSingleSequenceInputsMeanFCPlusPlusParallelLoopWorker(
# a locked integer - used to control access to the fasta files
# and to provide a thread safe counter
sampleCounter,
seqs_to_process,
pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList,
sequenceDict):
while True:
# acquire a lock, get the next set of sequences to process,
# and incremenet the sequence counter
with sampleCounter.get_lock():
try:
(maternalFastas, paternalFastas, peakHeightLineElements
)= seqs_to_process.get_nowait()
except Queue.Empty:
return
print len(peakHeightLineElements)
print peakHeightLineElements
sampleCount = int(sampleCounter.value)
sampleCounter.value += 16
# make the sequences and write their images and corresponding values to disk
makeSingleSequenceInputsMeanFCPlusPlusParallelLoop(
maternalFastas,
paternalFastas,
peakHeightLineElements,
pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList,
sequenceDict,
sampleCount)
# unreachable
assert False
def build_seqs_to_process_queue(peakHeightFile):
# Make sequence inputs for all of the pairs of individuals
# ASSUMES THAT THE INDIVIDUALS LISTED IN maternalFastaFileNameListFile and paternalFastaFileNameListFile ARE IN THE SAME ORDER
# ASSUMES THAT THE COLUMNS OF THE SIGNAL MATRIX ARE IN THE SAME ORDER AS THE INDIVIDUALS IN maternalFastaFileNameListFile
maternalFastaFileNameListFile = open(MATERNALFASTAFILENAMELISTFILENAME)
paternalFastaFileNameListFile = open(PATERNALFASTAFILENAMELISTFILENAME)
maternalFastaFileNameList = [] # Can get from files
maternalFastaFileList = []
paternalFastaFileList = []
for line in maternalFastaFileNameListFile:
# Iterate through the fasta files and make a list of the maternal
# and paternal fastas
maternalFastaFileNameList.append(line.strip())
maternalFastaFileList.append(open(line.strip()))
paternalFastaFileList.append(
open(paternalFastaFileNameListFile.readline().strip()))
maternalFastaFileNameListFile.close()
paternalFastaFileNameListFile.close()
all_sequences = multiprocessing.Queue()
while True:
maternalFastas = getFastaList(maternalFastaFileList)
# if there is no more sequence to process, we are done
if maternalFastas == None:
break
paternalFastas = getFastaList(paternalFastaFileList)
peakHeightLineElements = peakHeightFile.readline().strip().split("\t")
all_sequences.put(
(maternalFastas, paternalFastas, peakHeightLineElements))
return [all_sequences, maternalFastaFileNameList]
def makeSequenceInputsMeanFCAllSeqIterParallel(sequenceDict):
# Make sequence inputs for all of the pairs of individuals and put them in the queue
peakHeightFile = open(PEAKHEIGHTFILENAME)
[seqs_to_process, maternalFastaFileNameList] = build_seqs_to_process_queue(peakHeightFile)
[ pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList ] = initializeFilePrefixLists(maternalFastaFileNameList)
sampleCounter = Value('i',0)
pids = []
for i in xrange(NPROC):
# Iterate through all of the sequences and make all of the images for each combination
pid = os.fork()
if pid == 0:
makeSingleSequenceInputsMeanFCPlusPlusParallelLoopWorker(
sampleCounter,
seqs_to_process,
pngFileNamePrefixList,
valueFileNamePrefixMeanList,
valueFileNamePrefixFCAList,
valueFileNamePrefixFCBList,
outputFileList,
sequenceDict)
os._exit(0)
else:
pids.append(pid)
# wait for all of the worker processes to finish
for pid in pids:
os.waitpid(pid, 0)
for outputFile in outputFileList:
# Iterate through the output files and close them
outputFile.close()
# close the output file
ofp.close()
print "FINISHED"
def main():
sequenceDict = makeSequenceDict()
makeSequenceInputsMeanFCAllSeqIterParallel(sequenceDict)
if __name__=="__main__":
main()
| {
"content_hash": "40b6d47305eaa9fe510a4e7bdda2b183",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 324,
"avg_line_length": 43.77805486284289,
"alnum_prop": 0.7111364283679863,
"repo_name": "imk1/IMKTFBindingCode",
"id": "2b9a342737b84a2c8bf63a86305ba8eaebd22342",
"size": "17555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makeSequenceInputsMeanFCPlusPlusParallel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1156919"
},
{
"name": "R",
"bytes": "22835"
},
{
"name": "Shell",
"bytes": "416606"
}
],
"symlink_target": ""
} |
import nltk
import numpy as np
import pickle
from process_twt import *
class NBClassifier(object):
"""
A Naive Bayes Classifier for sentiment analysis
Attributes:
feature_list: A list containing informative words
stop_words: A list containing stop words
is_trained: An indicator of whether classifier is trained
NBClassifier: Classifier
"""
def __init__(self, name='NBClassifier'):
self.feature_list = []
self.stop_words = get_stopwords()
self.slang_dict = get_slang_dict()
self.is_trained = False
self.NBClassifier = []
self.name = name
def get_feature_vector(self, twt):
feature_vector = []
# split tweet into words
words = twt.split()
for w in words:
# strip punctuation
w = w.strip('\'"?,.')
# check if the word stats with an alphabet
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*$", w)
# ignore if it is a stop word
if w in self.stop_words or val is None:
continue
else:
feature_vector.append(w.lower())
return feature_vector
def extract_features(self, twt):
twt_words = set(twt)
features = {}
for word in self.feature_list:
features['contains(%s)' % word] = (word in twt_words)
return features
def train(self, pos_twt, neg_twt):
tweets = []
for row in pos_twt:
sentiment = 'positive'
processed_twt = preprocess(row, slangdict=self.slang_dict)
feature_vector = self.get_feature_vector(processed_twt)
self.feature_list.extend(feature_vector)
tweets.append((feature_vector, sentiment))
for row in neg_twt:
sentiment = 'negative'
processed_twt = preprocess(row, slangdict=self.slang_dict)
feature_vector = self.get_feature_vector(processed_twt)
self.feature_list.extend(feature_vector)
tweets.append((feature_vector, sentiment))
# remove duplicates in feature list
self.feature_list = list(set(self.feature_list))
# train classifier
training_set = nltk.classify.util.apply_features(self.extract_features, tweets)
self.NBClassifier = nltk.NaiveBayesClassifier.train(training_set)
self.is_trained = True
def test(self, twt):
if self.is_trained:
score = np.zeros((len(twt), 1))
for cnt, row in enumerate(twt):
row = preprocess(row)
score[cnt] = (self.NBClassifier.prob_classify(self.extract_features(
self.get_feature_vector(row)))).prob('positive')
return score
def informative_features(self, num=10):
if self.is_trained:
return self.NBClassifier.show_most_informative_features(num)
else:
return ['Error: Classifier has not been trained']
def save(self):
path = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'model')
if not os.path.exists(path):
os.makedirs(path)
f = open(os.path.join(path, self.name+'.pickle'), 'wb')
pickle.dump(self, f)
f.close()
| {
"content_hash": "b235405f299c1fd33ed458aec45a10a2",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 87,
"avg_line_length": 35.57608695652174,
"alnum_prop": 0.5835624809043691,
"repo_name": "qingshuimonk/bhtsa",
"id": "d1f6cbf9d2281a7eb0add1522759369dcf67391c",
"size": "3273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bhtsa/NBClassifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "343976"
},
{
"name": "Python",
"bytes": "29750"
}
],
"symlink_target": ""
} |
import base64
import socket
import sys
import time
import xml.etree.ElementTree as ET
from . import log
class Response:
""" Response objects for the DBGP module.
Contains response data from a command made to the debugger.
"""
ns = '{urn:debugger_protocol_v1}'
def __init__(self, response, cmd, cmd_args, api):
self.response = response
self.cmd = cmd
self.cmd_args = cmd_args
self.xml = None
self.api = api
if "<error" in self.response:
self.__parse_error()
def __parse_error(self):
"""Parse an error message which has been returned
in the response, then raise it as a DBGPError."""
xml = self.as_xml()
err_el = xml.find('%serror' % self.ns)
if err_el is None:
raise DBGPError("Could not parse error from return XML", 1)
else:
code = err_el.get("code")
if code is None:
raise ResponseError("Missing error code in response",
self.response)
elif int(code) == 4:
raise CmdNotImplementedError('Command not implemented')
msg_el = err_el.find('%smessage' % self.ns)
if msg_el is None:
raise ResponseError("Missing error message in response",
self.response)
raise DBGPError(msg_el.text, code)
def get_cmd(self):
"""Get the command that created this response."""
return self.cmd
def get_cmd_args(self):
"""Get the arguments to the command."""
return self.cmd_args
def as_string(self):
"""Return the full response as a string.
There is a __str__ method, which will render the
whole object as a string and should be used for
displaying.
"""
return self.response
def as_xml(self):
"""Get the response as element tree XML.
Returns an xml.etree.ElementTree.Element object.
"""
if self.xml is None:
self.xml = ET.fromstring(self.response)
self.__determine_ns()
return self.xml
def __determine_ns(self):
tag_repr = str(self.xml.tag)
if tag_repr[0] != '{':
raise DBGPError('Invalid or missing XML namespace', 1)
else:
ns_parts = tag_repr.split('}')
self.ns = ns_parts[0] + '}'
def __str__(self):
return self.as_string()
class ContextNamesResponse(Response):
def names(self):
names = {}
for c in list(self.as_xml()):
names[int(c.get('id'))] = c.get('name')
return names
class TraceResponse(Response):
"""Response object returned by the trace command."""
def __str__(self):
return self.as_xml().get('trace')
class StatusResponse(Response):
"""Response object returned by the status command."""
def __str__(self):
return self.as_xml().get('status')
class StackGetResponse(Response):
"""Response object used by the stack_get command."""
def get_stack(self):
return list(self.as_xml())
class ContextGetResponse(Response):
"""Response object used by the context_get command.
The property nodes are converted into ContextProperty
objects, which are much easier to use."""
def __init__(self, response, cmd, cmd_args, api):
Response.__init__(self, response, cmd, cmd_args, api)
self.properties = []
def get_context(self):
for c in list(self.as_xml()):
self.create_properties(ContextProperty(c))
return self.properties
def create_properties(self, property):
self.properties.append(property)
for p in property.children:
self.create_properties(p)
class EvalResponse(ContextGetResponse):
"""Response object returned by the eval command."""
def __init__(self, response, cmd, cmd_args, api):
try:
ContextGetResponse.__init__(self, response, cmd, cmd_args, api)
except DBGPError as e:
if int(e.args[1]) == 206:
raise EvalError()
else:
raise e
def get_context(self):
code = self.get_code()
for c in list(self.as_xml()):
self.create_properties(EvalProperty(c, code, self.api.language))
return self.properties
def get_code(self):
cmd = self.get_cmd_args()
parts = cmd.split('-- ')
missing_padding = len(parts[1]) % 4
if missing_padding != 0:
parts[1] += '='* (4 - missing_padding)
return base64.b64decode(parts[1].encode('utf-8')).decode('utf-8')
class BreakpointSetResponse(Response):
"""Response object returned by the breakpoint_set command."""
def get_id(self):
return int(self.as_xml().get('id'))
def __str__(self):
return self.as_xml().get('id')
class FeatureGetResponse(Response):
"""Response object specifically for the feature_get command."""
def is_supported(self):
"""Whether the feature is supported or not."""
xml = self.as_xml()
return int(xml.get('supported'))
def __str__(self):
if self.is_supported():
xml = self.as_xml()
return xml.text
return "* Feature not supported *"
class Api:
"""Api for eBGP commands.
Uses a Connection object to read and write with the debugger,
and builds commands and returns the results.
"""
conn = None
transID = 0
def __init__(self, connection):
"""Create a new Api using a Connection object.
The Connection object specifies the debugger connection,
and the Protocol provides a OO api to interacting
with it.
connection -- The Connection object to use
"""
self.language = None
self.protocol = None
self.idekey = None
self.startfile = None
self.conn = connection
if self.conn.isconnected() == 0:
self.conn.open()
self.__parse_init_msg(self.conn.recv_msg())
def __del__(self):
self.conn.close()
def __parse_init_msg(self, msg):
"""Parse the init message from the debugger"""
xml = ET.fromstring(msg)
self.language = xml.get("language")
if self.language is None:
raise ResponseError(
"Invalid XML response from debugger",
msg)
self.language = self.language.lower()
self.idekey = xml.get("idekey")
self.version = xml.get("api_version")
self.startfile = xml.get("fileuri")
def send_cmd(self, cmd, args='', res_cls=Response):
"""Send a command to the debugger.
This method automatically adds a unique transaction
ID to the command which is required by the debugger.
Returns a Response object, which contains the
response message and command.
cmd -- the command name, e.g. 'status'
args -- arguments for the command, which is optional
for certain commands (default '')
"""
args = args.strip()
send = cmd.strip()
self.transID += 1
send += ' -i ' + str(self.transID)
if args:
send += ' ' + args
log.Log("Command: "+send, log.Logger.DEBUG)
self.conn.send_msg(send)
msg = self.conn.recv_msg()
log.Log("Response: "+msg, log.Logger.DEBUG)
return res_cls(msg, cmd, args, self)
def status(self):
"""Get the debugger status.
Returns a Response object.
"""
return self.send_cmd('status', '', StatusResponse)
def feature_get(self, name):
"""Get the value of a feature from the debugger.
See the DBGP documentation for a list of features.
Returns a FeatureGetResponse object.
name -- name of the feature, e.g. encoding
"""
return self.send_cmd('feature_get', '-n '+str(name),
FeatureGetResponse)
def feature_set(self, name, value):
"""Set the value of a debugger feature.
See the DBGP documentation for a list of features.
Returns a Response object.
name -- name of the feature, e.g. encoding
value -- new value for the feature
"""
return self.send_cmd('feature_set', '-n {} -v {}'.format(name, value))
def run(self):
"""Tell the debugger to start or resume
execution."""
return self.send_cmd('run', '', StatusResponse)
def eval(self, code):
"""Tell the debugger to start or resume
execution."""
code_enc = base64.encodestring(code.encode('utf-8'))
args = '-- %s' % code_enc.decode('utf-8')
""" The python engine incorrectly requires length.
if self.language == 'python':
args = ("-l %i " % len(code_enc) ) + args"""
return self.send_cmd('eval', args, EvalResponse)
def step_into(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will break on the first statement in the function.
"""
return self.send_cmd('step_into', '', StatusResponse)
def step_over(self):
"""Tell the debugger to step to the next
statement.
If there's a function call, the debugger engine
will stop at the next statement after the function call.
"""
return self.send_cmd('step_over', '', StatusResponse)
def step_out(self):
"""Tell the debugger to step out of the statement.
The debugger will step out of the current scope.
"""
return self.send_cmd('step_out', '', StatusResponse)
def stop(self):
"""Tell the debugger to stop execution.
The script is terminated immediately."""
return self.send_cmd('stop', '', StatusResponse)
def stack_get(self):
"""Get the stack information.
"""
return self.send_cmd('stack_get', '', StackGetResponse)
def context_get(self, context=0):
"""Get the context variables.
"""
return self.send_cmd('context_get', '-c %i' % int(context),
ContextGetResponse)
def context_names(self):
"""Get the context types.
"""
return self.send_cmd('context_names', '', ContextNamesResponse)
def property_get(self, name):
"""Get a property.
"""
return self.send_cmd(
'property_get',
'-n "%s" -d 0' % name.replace("\\", "\\\\").replace("\"", "\\\""),
ContextGetResponse
)
def detach(self):
"""Tell the debugger to detach itself from this
client.
The script is not terminated, but runs as normal
from this point."""
ret = self.send_cmd('detach', '', StatusResponse)
self.conn.close()
return ret
def breakpoint_set(self, cmd_args):
"""Set a breakpoint.
The breakpoint type is defined by the arguments, see the
Breakpoint class for more detail."""
return self.send_cmd('breakpoint_set', cmd_args, BreakpointSetResponse)
def breakpoint_list(self):
return self.send_cmd('breakpoint_list')
def breakpoint_remove(self, id):
"""Remove a breakpoint by ID.
The ID is that returned in the response from breakpoint_set."""
return self.send_cmd('breakpoint_remove', '-d %i' % id, Response)
class ContextProperty:
ns = '{urn:debugger_protocol_v1}'
def __init__(self, node, parent=None, depth=0):
self.parent = parent
self.__determine_type(node)
self._determine_displayname(node)
self.encoding = node.get('encoding')
self.depth = depth
self.size = node.get('size')
self.value = ""
self.is_last_child = False
self._determine_children(node)
self.__determine_value(node)
self.__init_children(node)
if self.type == 'scalar':
self.size = len(self.value) - 2
def __determine_value(self, node):
if self.has_children:
self.value = ""
return
self.value = self._get_enc_node_text(node, 'value')
if self.value is None:
if self.encoding == 'base64':
if node.text is None:
self.value = ""
else:
self.value = base64.decodebytes(
node.text.encode("UTF-8")).decode("utf-8")
elif not self.is_uninitialized() and not self.has_children:
self.value = node.text
if self.value is None:
self.value = ""
self.num_crs = self.value.count('\n')
if self.type.lower() in ("string", "str", "scalar"):
self.value = '`%s`' % self.value.replace('`', '\\`')
def __determine_type(self, node):
type = node.get('classname')
if type is None:
type = node.get('type')
if type is None:
type = 'unknown'
self.type = type
def _determine_displayname(self, node):
display_name = node.get('fullname')
if display_name is None:
display_name = self._get_enc_node_text(node, 'fullname', "")
if display_name == '::':
display_name = self.type
self.display_name = display_name
def _get_enc_node_text(self, node, name, default=None):
n = node.find('%s%s' % (self.ns, name))
if n is not None and n.text is not None:
if n.get('encoding') == 'base64':
val = base64.decodebytes(n.text.encode("UTF-8")).decode(
"UTF-8")
else:
val = n.text
else:
val = None
if val is None:
return default
return val
def _determine_children(self, node):
children = node.get('numchildren')
if children is None:
children = node.get('children')
if children is None:
children = 0
else:
children = int(children)
self.num_declared_children = children
self.has_children = children > 0
self.children = []
def __init_children(self, node):
if self.has_children:
idx = 0
tagname = '%sproperty' % self.ns
children = list(node)
if children is not None:
for c in children:
if c.tag == tagname:
idx += 1
p = self._create_child(c, self, self.depth+1)
self.children.append(p)
if idx == self.num_declared_children:
p.mark_as_last_child()
def _create_child(self, node, parent, depth):
return ContextProperty(node, parent, depth)
def mark_as_last_child(self):
self.is_last_child = True
def is_uninitialized(self):
return self.type == 'uninitialized'
def child_count(self):
return len(self.children)
def type_and_size(self):
size = None
if self.has_children:
size = self.num_declared_children
elif self.size is not None:
size = self.size
if size is None:
return self.type
return "%s [%s]" % (self.type, size)
class EvalProperty(ContextProperty):
def __init__(self, node, code, language, parent=None, depth=0):
self.code = code
self.language = language.lower()
self.is_parent = parent is None
ContextProperty.__init__(self, node, parent, depth)
def _create_child(self, node, parent, depth):
return EvalProperty(node, self.code, self.language, parent, depth)
def _determine_displayname(self, node):
if self.is_parent:
self.display_name = self.code
else:
if self.language == 'php':
if self.parent.type == 'array':
if node.get('name').isdigit():
self.display_name = self.parent.display_name + \
"[%s]" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"['%s']" % node.get('name')
else:
self.display_name = self.parent.display_name + \
"->"+node.get('name')
elif self.language == 'perl':
self.display_name = node.get('fullname')
else:
name = node.get('name')
if name is None:
name = "?"
name = self._get_enc_node_text(node, 'name', '?')
if self.parent.type == 'list':
self.display_name = self.parent.display_name + name
else:
self.display_name = self.parent.display_name + \
"." + name
""" Errors/Exceptions """
class TimeoutError(Exception):
pass
class DBGPError(Exception):
"""Raised when the debugger returns an error message."""
pass
class CmdNotImplementedError(Exception):
"""Raised when the debugger returns an error message."""
pass
class EvalError(Exception):
"""Raised when some evaluated code is invalid."""
pass
class ResponseError(Exception):
"""An error caused by an unexpected response from the
debugger (e.g. invalid format XML)."""
pass
class TraceError(Exception):
"""Raised when trace is out of domain."""
pass
| {
"content_hash": "34da816fbba4b8a2d60133a2b92e8522",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 79,
"avg_line_length": 30.641247833622185,
"alnum_prop": 0.5558823529411765,
"repo_name": "Taluu/vdebug",
"id": "c03ca7aad6a247f8a838073b430b8af53429b8f8",
"size": "17680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/vdebug/dbgp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "5738"
},
{
"name": "Python",
"bytes": "156443"
},
{
"name": "Ruby",
"bytes": "17392"
},
{
"name": "Shell",
"bytes": "4732"
},
{
"name": "Vim script",
"bytes": "14432"
}
],
"symlink_target": ""
} |
import ntplib
from time import ctime
def get_current_time():
ntp_client = ntplib.NTPClient()
response = ntp_client.request('pool.ntp.org')
print 'Current time is', ctime(response.tx_time)
if __name__ == '__main__':
get_current_time()
| {
"content_hash": "46673dd7fcc91f6d57ae2fa0fd53cbde",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 25.2,
"alnum_prop": 0.6626984126984127,
"repo_name": "tutsplus/introduction-to-python",
"id": "4845d6548f17b45b823d0e38326a2e27e57861c7",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "9-connecting-to-network-services/6-whattimeisit.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8999"
}
],
"symlink_target": ""
} |
"""The tests for the MQTT automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY
from tests.common import async_fire_mqtt_message, async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass, mqtt_mock):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_on_topic_match(hass, calls):
"""Test if message is fired on topic match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic"},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.platform }} - {{ trigger.topic }}"
" - {{ trigger.payload }} - "
"{{ trigger.payload_json.hello }}"
},
},
}
},
)
async_fire_mqtt_message(hass, "test-topic", '{ "hello": "world" }')
await hass.async_block_till_done()
assert len(calls) == 1
assert 'mqtt - test-topic - { "hello": "world" } - world' == calls[0].data["some"]
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_mqtt_message(hass, "test-topic", "test_payload")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_topic_and_payload_match(hass, calls):
"""Test if message is fired on topic and payload match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "mqtt",
"topic": "test-topic",
"payload": "hello",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_mqtt_message(hass, "test-topic", "hello")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_topic_but_no_payload_match(hass, calls):
"""Test if message is not fired on topic but no payload."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "mqtt",
"topic": "test-topic",
"payload": "hello",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_mqtt_message(hass, "test-topic", "no-hello")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_encoding_default(hass, calls, mqtt_mock):
"""Test default encoding."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic"},
"action": {"service": "test.automation"},
}
},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic", ANY, 0, "utf-8")
async def test_encoding_custom(hass, calls, mqtt_mock):
"""Test default encoding."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic", "encoding": ""},
"action": {"service": "test.automation"},
}
},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic", ANY, 0, None)
| {
"content_hash": "9af7eb63ecc55d6f0a325495a48d08b6",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 87,
"avg_line_length": 30.548872180451127,
"alnum_prop": 0.5422101895151366,
"repo_name": "soldag/home-assistant",
"id": "b35082ef2c6258e371ff8581b7daa1dc74883d8a",
"size": "4063",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19025087"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
| {
"content_hash": "930f929a59b6e8a60a832383bfce5bb5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 19.6,
"alnum_prop": 0.7653061224489796,
"repo_name": "zouyapeng/instance_monitor_server",
"id": "182671cae60ac172c27c4db18e54233b4129e164",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36582"
},
{
"name": "Shell",
"bytes": "1124"
}
],
"symlink_target": ""
} |
"""
These are the user-preferences. For the network and system preferences, take a
look at `Settings.py`. Use it like this example:
from neo.UserPreferences import preferences
print(preferences.token_style)
preferences.set_theme("light")
"""
import json
from json.decoder import JSONDecodeError
from neo.Settings import FILENAME_PREFERENCES
from neo.logging import log_manager
logger = log_manager.getLogger()
PREFERENCES_DEFAULT = {
"theme": "dark",
"themes": {
"dark": {
"Command": "#ff0066",
"Default": "#00ee00",
"Neo": "#0000ee",
"Number": "#ffffff"
},
"light": {
"Command": "#ff0066",
"Default": "#008800",
"Neo": "#0000ee",
"Number": "#000000"
}
}
}
class UserPreferencesHolder:
# Merged default preferences with user-specific overrides
_prefs = {}
# Only the user-specific preferences
_userprefs = {}
# Remember the potentially custom filename
_preferences_filename = None
def __init__(self, preferences_filename=FILENAME_PREFERENCES):
self._preferences_filename = preferences_filename
self._prefs = PREFERENCES_DEFAULT
try:
with open(self._preferences_filename) as data_file:
self._userprefs = json.load(data_file)
self._prefs.update(self._userprefs)
except FileNotFoundError as e:
# No user-specific overrides, which is ok
pass
except JSONDecodeError as e:
logger.error("JSONDecodeError: {} in {}".format(e.msg, self._preferences_filename))
raise
def _save_userprefs(self):
with open(self._preferences_filename, "w") as data_file:
data_file.write(json.dumps(self._userprefs, indent=4, sort_keys=True))
def set_theme(self, theme_name):
if theme_name not in self._prefs["themes"].keys():
raise ValueError("Error: cannot set theme_name '%s', no theme with this name" % theme_name)
self._userprefs["theme"] = theme_name
self._prefs.update(self._userprefs)
self._save_userprefs()
@property
def token_style(self):
return self._prefs["themes"][self._prefs["theme"]]
preferences = UserPreferencesHolder()
| {
"content_hash": "9cd3211edf28a97cfbdf7bb7ecaa7031",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 103,
"avg_line_length": 28.71604938271605,
"alnum_prop": 0.6109200343938092,
"repo_name": "hal0x2328/neo-python",
"id": "3cc55eba8e22c33e38fa4c7d31af54d15bd4b361",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/UserPreferences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2059"
},
{
"name": "Makefile",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "1758220"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0002_auto_20170914_0606'),
]
operations = [
migrations.AddField(
model_name='officer',
name='first',
field=models.CharField(blank=True, max_length=50, verbose_name='Name'),
),
migrations.AddField(
model_name='officer',
name='picture',
field=models.ImageField(blank=True, upload_to='/events', verbose_name='Picture'),
),
]
| {
"content_hash": "453b191e051c6c321569b278fa7f29f1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 93,
"avg_line_length": 26.217391304347824,
"alnum_prop": 0.5804311774461028,
"repo_name": "CPLUG/cplug.org-backend",
"id": "4d5935bc2281d99225d0a283ba614e217f54cf32",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index/migrations/0003_auto_20170919_2309.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "407"
},
{
"name": "HTML",
"bytes": "25200"
},
{
"name": "Python",
"bytes": "13642"
}
],
"symlink_target": ""
} |
import json
import os
import re
from collections import deque
from functools import partial, wraps
from flask import Flask, jsonify, make_response, request
from flask_compress import Compress
from flask_cors import CORS
from flask_restplus import Api as RestPlusAPI
from flask_restplus import Resource
from jsonschema import RefResolutionError
from loguru import logger
from werkzeug.http import generate_etag
from flexget import manager
from flexget.config_schema import format_checker, process_config
from flexget.utils.database import with_session
from flexget.webserver import User
from . import __path__
__version__ = '1.7.1'
logger = logger.bind(name='api')
class APIClient:
"""
This is an client which can be used as a more pythonic interface to the rest api.
It skips http, and is only usable from within the running flexget process.
"""
def __init__(self):
self.app = api_app.test_client()
def __getattr__(self, item):
return APIEndpoint('/api/' + item, self.get_endpoint)
def get_endpoint(self, url, data=None, method=None):
if method is None:
method = 'POST' if data is not None else 'GET'
auth_header = dict(Authorization='Token %s' % api_key())
response = self.app.open(
url, data=data, follow_redirects=True, method=method, headers=auth_header
)
result = json.loads(response.get_data(as_text=True))
# TODO: Proper exceptions
if 200 > response.status_code >= 300:
raise Exception(result['error'])
return result
class APIEndpoint:
def __init__(self, endpoint, caller):
self.endpoint = endpoint
self.caller = caller
def __getattr__(self, item):
return self.__class__(self.endpoint + '/' + item, self.caller)
__getitem__ = __getattr__
def __call__(self, data=None, method=None):
return self.caller(self.endpoint, data=data, method=method)
def api_version(f):
""" Add the 'API-Version' header to all responses """
@wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
rv.headers['API-Version'] = __version__
return rv
return wrapped
class APIResource(Resource):
"""All api resources should subclass this class."""
method_decorators = [with_session, api_version]
def __init__(self, api, *args, **kwargs):
self.manager = manager.manager
super().__init__(api, *args, **kwargs)
class API(RestPlusAPI):
"""
Extends a flask restplus :class:`flask_restplus.Api` with:
- methods to make using json schemas easier
- methods to auto document and handle :class:`ApiError` responses
"""
def validate(self, model, schema_override=None, description=None):
"""
When a method is decorated with this, json data submitted to the endpoint will be validated with the given
`model`. This also auto-documents the expected model, as well as the possible :class:`ValidationError` response.
"""
def decorator(func):
@api.expect((model, description))
@api.response(ValidationError)
@wraps(func)
def wrapper(*args, **kwargs):
payload = request.json
try:
schema = schema_override if schema_override else model.__schema__
errors = process_config(config=payload, schema=schema, set_defaults=False)
if errors:
raise ValidationError(errors)
except RefResolutionError as e:
raise APIError(str(e))
return func(*args, **kwargs)
return wrapper
return decorator
def response(self, code_or_apierror, description='Success', model=None, **kwargs):
"""
Extends :meth:`flask_restplus.Api.response` to allow passing an :class:`ApiError` class instead of
response code. If an `ApiError` is used, the response code, and expected response model, is automatically
documented.
"""
try:
if issubclass(code_or_apierror, APIError):
description = code_or_apierror.description or description
return self.doc(
responses={
code_or_apierror.status_code: (
description,
code_or_apierror.response_model,
)
},
**kwargs,
)
except TypeError:
# If first argument isn't a class this happens
pass
return self.doc(responses={code_or_apierror: (description, model)}, **kwargs)
def pagination_parser(self, parser=None, sort_choices=None, default=None, add_sort=None):
"""
Return a standardized pagination parser, to be used for any endpoint that has pagination.
:param RequestParser parser: Can extend a given parser or create a new one
:param tuple sort_choices: A tuple of strings, to be used as server side attribute searches
:param str default: The default sort string, used `sort_choices[0]` if not given
:param bool add_sort: Add sort order choices without adding specific sort choices
:return: An api.parser() instance with pagination and sorting arguments.
"""
pagination = parser.copy() if parser else self.parser()
pagination.add_argument('page', type=int, default=1, help='Page number')
pagination.add_argument('per_page', type=int, default=50, help='Results per page')
if sort_choices or add_sort:
pagination.add_argument(
'order', choices=('desc', 'asc'), default='desc', help='Sorting order'
)
if sort_choices:
pagination.add_argument(
'sort_by',
choices=sort_choices,
default=default or sort_choices[0],
help='Sort by attribute',
)
return pagination
api_app = Flask(__name__, template_folder=os.path.join(__path__[0], 'templates'))
api_app.config['REMEMBER_COOKIE_NAME'] = 'flexget.token'
api_app.config['DEBUG'] = True
api_app.config['ERROR_404_HELP'] = False
api_app.url_map.strict_slashes = False
CORS(api_app, expose_headers='Link, Total-Count, Count, ETag')
Compress(api_app)
api = API(
api_app,
title='Flexget API v{}'.format(__version__),
version=__version__,
description='View and manage flexget core operations and plugins. Open each endpoint view for usage information.'
' Navigate to http://flexget.com/API for more details.',
format_checker=format_checker,
)
base_message = {
'type': 'object',
'properties': {
'status_code': {'type': 'integer'},
'message': {'type': 'string'},
'status': {'type': 'string'},
},
'required': ['status_code', 'message', 'status'],
}
base_message_schema = api.schema_model('base_message', base_message)
class APIError(Exception):
description = 'Server error'
status_code = 500
status = 'Error'
response_model = base_message_schema
def __init__(self, message=None, payload=None):
self.message = message
self.payload = payload
def to_dict(self):
rv = self.payload or {}
rv.update(status_code=self.status_code, message=self.message, status=self.status)
return rv
@classmethod
def schema(cls):
return cls.response_model.__schema__
class NotFoundError(APIError):
status_code = 404
description = 'Not found'
class Unauthorized(APIError):
status_code = 401
description = 'Unauthorized'
class BadRequest(APIError):
status_code = 400
description = 'Bad request'
class Conflict(APIError):
status_code = 409
description = 'Conflict'
class PreconditionFailed(APIError):
status_code = 412
description = 'Precondition failed'
class NotModified(APIError):
status_code = 304
description = 'not modified'
class ValidationError(APIError):
status_code = 422
description = 'Validation error'
response_model = api.schema_model(
'validation_error',
{
'type': 'object',
'properties': {
'validation_errors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'message': {
'type': 'string',
'description': 'A human readable message explaining the error.',
},
'validator': {
'type': 'string',
'description': 'The name of the failed validator.',
},
'validator_value': {
'type': 'string',
'description': 'The value for the failed validator in the schema.',
},
'path': {'type': 'string'},
'schema_path': {'type': 'string'},
},
},
}
},
'required': ['validation_errors'],
},
)
verror_attrs = (
'message',
'cause',
'validator',
'validator_value',
'path',
'schema_path',
'parent',
)
def __init__(self, validation_errors, message='validation error'):
payload = {
'validation_errors': [self._verror_to_dict(error) for error in validation_errors]
}
super().__init__(message, payload=payload)
def _verror_to_dict(self, error):
error_dict = {}
for attr in self.verror_attrs:
if isinstance(getattr(error, attr), deque):
error_dict[attr] = list(getattr(error, attr))
else:
error_dict[attr] = str(getattr(error, attr))
return error_dict
empty_response = api.schema_model('empty', {'type': 'object'})
def success_response(message, status_code=200, status='success'):
rsp_dict = {'message': message, 'status_code': status_code, 'status': status}
rsp = jsonify(rsp_dict)
rsp.status_code = status_code
return rsp
@api.errorhandler(APIError)
@api.errorhandler(NotFoundError)
@api.errorhandler(ValidationError)
@api.errorhandler(BadRequest)
@api.errorhandler(Unauthorized)
@api.errorhandler(Conflict)
@api.errorhandler(NotModified)
@api.errorhandler(PreconditionFailed)
def api_errors(error):
return error.to_dict(), error.status_code
@with_session
def api_key(session=None):
logger.debug('fetching token for internal lookup')
return session.query(User).first().token
def etag(method=None, cache_age=0):
"""
A decorator that add an ETag header to the response and checks for the "If-Match" and "If-Not-Match" headers to
return an appropriate response.
:param method: A GET or HEAD flask method to wrap
:param cache_age: max-age cache age for the content
:return: The method's response with the ETag and Cache-Control headers, raises a 412 error or returns a 304 response
"""
# If called without method, we've been called with optional arguments.
# We return a decorator with the optional arguments filled in.
# Next time round we'll be decorating method.
if method is None:
return partial(etag, cache_age=cache_age)
@wraps(method)
def wrapped(*args, **kwargs):
# Identify if this is a GET or HEAD in order to proceed
assert request.method in ['HEAD', 'GET'], '@etag is only supported for GET requests'
rv = method(*args, **kwargs)
rv = make_response(rv)
# Some headers can change without data change for specific page
content_headers = (
rv.headers.get('link', '')
+ rv.headers.get('count', '')
+ rv.headers.get('total-count', '')
)
data = (rv.get_data().decode() + content_headers).encode()
etag = generate_etag(data)
rv.headers['Cache-Control'] = 'max-age=%s' % cache_age
rv.headers['ETag'] = etag
if_match = request.headers.get('If-Match')
if_none_match = request.headers.get('If-None-Match')
if if_match:
etag_list = [tag.strip() for tag in if_match.split(',')]
if etag not in etag_list and '*' not in etag_list:
raise PreconditionFailed('etag does not match')
elif if_none_match:
etag_list = [tag.strip() for tag in if_none_match.split(',')]
if etag in etag_list or '*' in etag_list:
raise NotModified
return rv
return wrapped
def pagination_headers(total_pages, total_items, page_count, request):
"""
Creates the `Link`. 'Count' and 'Total-Count' headers, to be used for pagination traversing
:param total_pages: Total number of pages
:param total_items: Total number of items in all the pages
:param page_count: Item count for page (may differ from page size request)
:param request: The flask request used, required to build other reoccurring vars like url and such.
:return:
"""
# Build constant variables from request data
url = request.url_root + request.path.lstrip('/')
per_page = request.args.get('per_page', 50)
page = int(request.args.get('page', 1))
# Build the base template
LINKTEMPLATE = '<{}?per_page={}&'.format(url, per_page)
# Removed page and per_page from query string
query_string = re.sub(br'per_page=\d+', b'', request.query_string)
query_string = re.sub(br'page=\d+', b'', query_string)
query_string = re.sub(b'&{2,}', b'&', query_string)
# Add all original query params
LINKTEMPLATE += query_string.decode().lstrip('&') + '&page={}>; rel="{}"'
link_string = ''
if page > 1:
link_string += LINKTEMPLATE.format(page - 1, 'prev') + ', '
if page < total_pages:
link_string += LINKTEMPLATE.format(page + 1, 'next') + ', '
link_string += LINKTEMPLATE.format(total_pages, 'last')
return {'Link': link_string, 'Total-Count': total_items, 'Count': page_count}
| {
"content_hash": "2558019cdefea7d1d1418a3336cbdea3",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 120,
"avg_line_length": 33.270833333333336,
"alnum_prop": 0.5958394211368538,
"repo_name": "malkavi/Flexget",
"id": "8109d4eff52e99a9c10c765d2e6cd169476737c9",
"size": "14373",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/api/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "84425"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3514392"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1530"
}
],
"symlink_target": ""
} |
from essence import World, System, Component, UnregisteredComponentError
from fixtures import world
import pytest
def xtest_there_is_a_global_world_instance():
from essence.base import world
def test_entity_shorthand(world):
entity = world.create_entity()
assert not entity.has(Component)
assert entity.get(Component, None) is None
component = Component()
entity.add(component)
assert entity.has(Component)
assert entity.get(Component) is component
entity.remove(Component)
assert not entity.has(Component)
assert entity.get(Component, None) is None
entity.destroy()
assert entity not in world.entities
def test_shortcut_for_registered_components(world):
world.register('component', Component)
c = Component()
e = world.create_entity()
e.add(c)
assert e.component == c
def test_fails_on_unregistered_components(world):
e = world.create_entity()
with pytest.raises(UnregisteredComponentError):
e.component
| {
"content_hash": "d7ea2498df6dc4b6f0d681c33ec4b6f8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 30.242424242424242,
"alnum_prop": 0.7264529058116233,
"repo_name": "chromy/essence",
"id": "2db77cf2db55ca0030e20685411125a70c12ffe8",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_shortcuts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15957"
}
],
"symlink_target": ""
} |
from pyramid.security import Allow, Everyone, ALL_PERMISSIONS
from sqlalchemy import (
Column,
DateTime,
Float,
ForeignKey,
Index,
Integer,
Text,
Unicode,
UnicodeText
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
relationship,
scoped_session,
sessionmaker,
)
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class Product(Base):
"""Product model."""
__tablename__ = 'products'
id = Column(Integer, primary_key=True)
product = Column(Unicode(255))
one_point_five_oz = Column(Unicode(255))
one_oz = Column(Unicode(255))
point_seven_five_oz = Column(Unicode(255))
point_five_oz = Column(Unicode(255))
point_two_five_oz = Column(Unicode(255))
def __init__(self, product, one_point_five_oz, one_oz, point_seven_five_oz,
point_five_oz, point_two_five_oz):
"""Init Product."""
self.product = product
self.one_point_five_oz = one_point_five_oz
self.one_oz = one_oz
self.point_seven_five_oz = point_seven_five_oz
self.point_five_oz = point_five_oz
self.point_two_five_oz = point_two_five_oz
def add_product(self):
"""Commit product to DB."""
DBSession.add(self)
DBSession.flush()
def delete_product(self):
"""Remove product from DB."""
DBSession.delete(self)
# @property
# def __acl__(self):
# """Family object security."""
# acl = []
# for member in self.users:
# # everyone in family may view
# acl.append((Allow, member.user_id, 'view'))
# # only adults in family may edit
# if member.role == 'adult':
# acl.append((Allow, member.user_id, 'edit'))
# return acl
# Above not sure if I will need. Below should be a query by name
@classmethod
def get_product_by_id(cls, id):
"""Given primary key for Product table, return product obj."""
try:
queried_product = DBSession.query(Product).get(id)
return queried_product
except NoResultFound:
return 'Product not found.'
class Cocktail(Base):
"""Product model."""
__tablename__ = 'cocktail'
id = Column(Integer, primary_key=True)
name = Column(Text)
cost = Column(Integer)
def add_cocktail(self):
"""Commit cocktail to DB."""
DBSession.add(self)
DBSession.flush()
def delete_cocktail(self):
"""Remove cocktail from DB."""
DBSession.delete(self)
# functions with math for
| {
"content_hash": "de55b0f7f3de6eba88c60c1dad5ea63c",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 27.5,
"alnum_prop": 0.6131907308377896,
"repo_name": "jaredscarr/bartrack",
"id": "09c08ec281308b27c6ca2e22a6a16d09fe15b462",
"size": "2805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bartrack/bartrack/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "14343"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import *
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
class Walk(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('Walk')
def __init__(self, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.fsm = ClassicFSM.ClassicFSM('Walk', [State.State('off', self.enterOff, self.exitOff, ['walking', 'swimming', 'slowWalking']),
State.State('walking', self.enterWalking, self.exitWalking, ['swimming', 'slowWalking']),
State.State('swimming', self.enterSwimming, self.exitSwimming, ['walking', 'slowWalking']),
State.State('slowWalking', self.enterSlowWalking, self.exitSlowWalking, ['walking', 'swimming'])], 'off', 'off')
self.fsm.enterInitialState()
self.IsSwimSoundAudible = 0
self.swimSoundPlaying = 0
def load(self):
pass
def unload(self):
del self.fsm
def enter(self, slowWalk = 0):
base.localAvatar.startPosHprBroadcast()
base.localAvatar.startBlink()
base.localAvatar.attachCamera()
shouldPush = 1
if len(base.localAvatar.cameraPositions) > 0:
shouldPush = not base.localAvatar.cameraPositions[base.localAvatar.cameraIndex][4]
base.localAvatar.startUpdateSmartCamera(shouldPush)
base.localAvatar.showName()
base.localAvatar.collisionsOn()
base.localAvatar.startGlitchKiller()
base.localAvatar.enableAvatarControls()
def exit(self):
self.fsm.request('off')
self.ignore('control')
base.localAvatar.disableAvatarControls()
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopPosHprBroadcast()
base.localAvatar.stopBlink()
base.localAvatar.detachCamera()
base.localAvatar.stopGlitchKiller()
base.localAvatar.collisionsOff()
base.localAvatar.controlManager.placeOnFloor()
def enterOff(self):
pass
def exitOff(self):
pass
def enterWalking(self):
if base.localAvatar.hp > 0:
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedNormal()
base.localAvatar.applyBuffs()
else:
self.fsm.request('slowWalking')
def exitWalking(self):
base.localAvatar.stopTrackAnimToSpeed()
def setSwimSoundAudible(self, IsSwimSoundAudible):
self.IsSwimSoundAudible = IsSwimSoundAudible
if IsSwimSoundAudible == 0 and self.swimSoundPlaying:
self.swimSound.stop()
self.swimSoundPlaying = 0
def enterSwimming(self, swimSound):
base.localAvatar.setWalkSpeedNormal()
base.localAvatar.applyBuffs()
self.swimSound = swimSound
self.swimSoundPlaying = 0
base.localAvatar.b_setAnimState('swim', base.localAvatar.animMultiplier)
base.localAvatar.startSleepSwimTest()
taskMgr.add(self.__swim, 'localToonSwimming')
def exitSwimming(self):
taskMgr.remove('localToonSwimming')
self.swimSound.stop()
del self.swimSound
self.swimSoundPlaying = 0
base.localAvatar.stopSleepSwimTest()
def __swim(self, task):
speed = base.mouseInterfaceNode.getSpeed()
if speed == 0 and self.swimSoundPlaying:
self.swimSoundPlaying = 0
self.swimSound.stop()
elif speed > 0 and self.swimSoundPlaying == 0 and self.IsSwimSoundAudible:
self.swimSoundPlaying = 1
base.playSfx(self.swimSound, looping=1)
return Task.cont
def enterSlowWalking(self):
self.accept(base.localAvatar.uniqueName('positiveHP'), self.__handlePositiveHP)
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedSlow()
def __handlePositiveHP(self):
self.fsm.request('walking')
def exitSlowWalking(self):
base.localAvatar.stopTrackAnimToSpeed()
self.ignore(base.localAvatar.uniqueName('positiveHP'))
| {
"content_hash": "72c760f6bf0c2093bb15bc30de7c68be",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 138,
"avg_line_length": 37.288288288288285,
"alnum_prop": 0.6731094467262624,
"repo_name": "ToontownUprising/src",
"id": "ca63a2b4eac5aac3497d42cb6c3ec860c8fc2712",
"size": "4139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/safezone/Walk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "16244807"
},
{
"name": "Shell",
"bytes": "11615"
}
],
"symlink_target": ""
} |
"""
MoinMoin - Configuration for a single wiki
If you run a single wiki only, you can omit the farmconfig.py config
file and just use wikiconfig.py - it will be used for every request
we get in that case.
Note that there are more config options than you'll find in
the version of this file that is installed by default; see
the module MoinMoin.config.multiconfig for a full list of names and their
default values.
Also, the URL http://moinmo.in/HelpOnConfiguration has
a list of config options.
** Please do not use this file for a wiki farm. Use the sample file
from the wikifarm directory instead! **
"""
import os
from MoinMoin.config import multiconfig, url_prefix_static
class Config(multiconfig.DefaultConfig):
# Critical setup ---------------------------------------------------
# Directory containing THIS wikiconfig:
wikiconfig_dir = os.path.abspath(os.path.dirname(__file__))
# We assume that this config file is located in the instance directory, like:
# instance_dir/
# wikiconfig.py
# data/
# underlay/
# If that's not true, feel free to just set instance_dir to the real path
# where data/ and underlay/ is located:
#instance_dir = '/where/ever/your/instance/is'
instance_dir = wikiconfig_dir
# Where your own wiki pages are (make regular backups of this directory):
data_dir = os.path.join(instance_dir, 'data', '') # path with trailing /
# Where system and help pages are (you may exclude this from backup):
data_underlay_dir = os.path.join(instance_dir, 'underlay', '') # path with trailing /
# The URL prefix we use to access the static stuff (img, css, js).
# Note: moin runs a static file server at url_prefix_static path (relative
# to the script url).
# If you run your wiki script at the root of your site (/), just do NOT
# use this setting and it will automatically work.
# If you run your wiki script at /mywiki, you need to use this:
#url_prefix_static = '/mywiki' + url_prefix_static
# Wiki identity ----------------------------------------------------
# Site name, used by default for wiki name-logo [Unicode]
sitename = u'Untitled Wiki'
# Wiki logo. You can use an image, text or both. [Unicode]
# For no logo or text, use '' - the default is to show the sitename.
# See also url_prefix setting below!
logo_string = u'<img src="%s/common/moinmoin.png" alt="MoinMoin Logo">' % url_prefix_static
# name of entry page / front page [Unicode], choose one of those:
# a) if most wiki content is in a single language
#page_front_page = u"MyStartingPage"
# b) if wiki content is maintained in many languages
#page_front_page = u"FrontPage"
# The interwiki name used in interwiki links
#interwikiname = u'UntitledWiki'
# Show the interwiki name (and link it to page_front_page) in the Theme,
# nice for farm setups or when your logo does not show the wiki's name.
#show_interwiki = 1
# Security ----------------------------------------------------------
# This is checked by some rather critical and potentially harmful actions,
# like despam or PackageInstaller action:
superuser = [u"admin", ]
# IMPORTANT: grant yourself admin rights! replace YourName with
# your user name. See HelpOnAccessControlLists for more help.
# All acl_rights_xxx options must use unicode [Unicode]
#acl_rights_before = u"YourName:read,write,delete,revert,admin"
# The default (ENABLED) password_checker will keep users from choosing too
# short or too easy passwords. If you don't like this and your site has
# rather low security requirements, feel free to DISABLE the checker by:
#password_checker = None # None means "don't do any password strength checks"
# Link spam protection for public wikis (Uncomment to enable)
# Needs a reliable internet connection.
#from MoinMoin.security.antispam import SecurityPolicy
# Mail --------------------------------------------------------------
# Configure to enable subscribing to pages (disabled by default)
# or sending forgotten passwords.
# SMTP server, e.g. "mail.provider.com" (None to disable mail)
#mail_smarthost = ""
# The return address, e.g u"Jürgen Wiki <noreply@mywiki.org>" [Unicode]
#mail_from = u""
# "user pwd" if you need to use SMTP AUTH
#mail_login = ""
# User interface ----------------------------------------------------
# Add your wikis important pages at the end. It is not recommended to
# remove the default links. Leave room for user links - don't use
# more than 6 short items.
# You MUST use Unicode strings here, but you need not use localized
# page names for system and help pages, those will be used automatically
# according to the user selected language. [Unicode]
navi_bar = [
# If you want to show your page_front_page here:
#u'%(page_front_page)s',
u'RecentChanges',
u'FindPage',
u'HelpContents',
]
# The default theme anonymous or new users get
theme_default = 'modern'
# Language options --------------------------------------------------
# See http://moinmo.in/ConfigMarket for configuration in
# YOUR language that other people contributed.
# The main wiki language, set the direction of the wiki pages
language_default = 'en'
# the following regexes should match the complete name when used in free text
# the group 'all' shall match all, while the group 'key' shall match the key only
# e.g. CategoryFoo -> group 'all' == CategoryFoo, group 'key' == Foo
# moin's code will add ^ / $ at beginning / end when needed
# You must use Unicode strings here [Unicode]
page_category_regex = ur'(?P<all>Category(?P<key>(?!Template)\S+))'
page_dict_regex = ur'(?P<all>(?P<key>\S+)Dict)'
page_group_regex = ur'(?P<all>(?P<key>\S+)Group)'
page_template_regex = ur'(?P<all>(?P<key>\S+)Template)'
# Content options ---------------------------------------------------
# Show users hostnames in RecentChanges
show_hosts = 1
# Enable graphical charts, requires gdchart.
#chart_options = {'width': 600, 'height': 300}
| {
"content_hash": "5dc87ab788149416efde3deea1598bb3",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 95,
"avg_line_length": 38.9079754601227,
"alnum_prop": 0.6352885525070956,
"repo_name": "dpla/zen",
"id": "ea19e8279237379c8f46916afa01fc2a4d4b9d17",
"size": "6792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/testwiki/wiki/config/wikiconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "467"
},
{
"name": "Perl",
"bytes": "3117"
},
{
"name": "Python",
"bytes": "376842"
},
{
"name": "Shell",
"bytes": "402"
}
],
"symlink_target": ""
} |
import os
import csv
import json
import config
# process the config file, make sure we're setup properly
# ----------------------------------------
# CSV_EXISTS = False
# does the CSV file exist?
csv_filename = os.path.join(os.path.dirname(__file__), config.DATA_DIR + config.CSV_FILE)
print(csv_filename)
CSV_FILE = csv_filename
OUTPUT_FILE = os.path.join(os.path.dirname(__file__), config.DATA_DIR + config.OUTPUT_FILE)
# internal vars
# ----------------------------------------
# first row is event description fields
# second row is the actual event description
EQUIPMENT_MODE = False # we set this to true after Event Info and 4 lines
# we break into hte equipment after 4 blank lines
LINEBREAK_COUNTER = 0
LINEBREAK_LIMIT = 4
# write dictionary to json
data = {}
# these 2 will hold the final, processed data
processed_event_information = {}
processed_equipment_list = []
# these hold data straight from the CSV file
raw_event_information = {}
raw_event_information_list = []
raw_equipment_list = []
# some functions
# ----------------------------------------
def write_json(filename, data_dict):
with open(filename, "w") as outfile:
json.dump(data_dict, outfile)
# json.dumps(data_dict, outfile, sort_keys=True, indent=4, separators=(',', ': '))
print(filename + " ... done")
def lists_to_dicts(keys_list, values_list):
tmp_dict = {}
index = 0
for raw_key in keys_list:
# make sure we have values
if raw_key != '':
# lowercase and spaces to underscore
key = raw_key.lower().replace(" ", "_")
tmp_dict[key] = values_list[index]
index = index + 1
return tmp_dict
# process the CSV file
with open(CSV_FILE, "rb") as raw_csvfile:
csvfile = csv.reader(raw_csvfile, quotechar='|')
for row in csvfile:
# handle the event informtion first
if EQUIPMENT_MODE:
raw_equipment_list.append(row)
else:
if row[0] == '':
LINEBREAK_COUNTER = LINEBREAK_COUNTER + 1
if LINEBREAK_COUNTER == LINEBREAK_LIMIT:
LINEBREAK_COUNTER = 0
EQUIPMENT_MODE = True
else:
raw_event_information_list.append(row)
# do the things
# ----------------------------------------
# process event information
processed_event_information = lists_to_dicts(raw_event_information_list[0], raw_event_information_list[1])
# process the rental list
rental_keys = raw_equipment_list.pop(0) # table headers
for piece in raw_equipment_list:
processed_equipment_list.append(lists_to_dicts(rental_keys, piece))
# populate the data dict
# ----------------------------------------
data['event_information'] = processed_event_information
data['rental_equipment'] = processed_equipment_list
print("")
print(data)
print("")
# write out the file
# ----------------------------------------
write_json(OUTPUT_FILE, data)
| {
"content_hash": "91979dc4b6265a44cdaa7e27b8dc451d",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 106,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.6031260618416582,
"repo_name": "aztec8/cmi-rentals-frontend",
"id": "c6d86cadbd091f7017cae37497aaf30ed35d202a",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils/cmi-csv-import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7903"
},
{
"name": "JavaScript",
"bytes": "112689"
},
{
"name": "Python",
"bytes": "3243"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0043_auto_20170819_0340'),
]
operations = [
migrations.AlterField(
model_name='event',
name='latitude',
field=models.FloatField(default=139.7191),
),
migrations.AlterField(
model_name='event',
name='longitude',
field=models.FloatField(default=35.7291),
),
]
| {
"content_hash": "e8ba60c8a20a07d1209499c52b3fa836",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 54,
"avg_line_length": 23.434782608695652,
"alnum_prop": 0.5695732838589982,
"repo_name": "internship2016/sovolo",
"id": "9f4cea5b2a79de2081164ff919b056026011fc0d",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/event/migrations/0044_auto_20170819_0348.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56092"
},
{
"name": "HTML",
"bytes": "132262"
},
{
"name": "JavaScript",
"bytes": "107993"
},
{
"name": "Python",
"bytes": "255017"
}
],
"symlink_target": ""
} |
from .labels import LabelsPlugin
from electrum.plugin import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
| {
"content_hash": "91c230a45e153cab91ce2f499b1c2f4c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 23.642857142857142,
"alnum_prop": 0.6706948640483383,
"repo_name": "cryptapus/electrum",
"id": "56f1d079e08b2868995da4d53c9ccb1feab5000b",
"size": "331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum/plugins/labels/kivy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1169"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "NSIS",
"bytes": "7316"
},
{
"name": "Python",
"bytes": "2195369"
},
{
"name": "Shell",
"bytes": "20819"
}
],
"symlink_target": ""
} |
"""
A set of functions to get jobs from JOBS* and group them by object store
"""
from django.db import connection
from core.libs.cache import setCacheData
from core.libs.sqlcustom import fix_lob
from core.schedresource.utils import get_object_stores
import core.constants as const
def objectstore_summary_data(hours):
sqlRequest = """
SELECT JOBSTATUS, COUNT(JOBSTATUS) as COUNTJOBSINSTATE, COMPUTINGSITE, OBJSE, RTRIM(XMLAGG(XMLELEMENT(E,PANDAID,',').EXTRACT('//text()') ORDER BY PANDAID).GetClobVal(),',') AS PANDALIST
FROM
(SELECT DISTINCT t1.PANDAID, NUCLEUS, COMPUTINGSITE, JOBSTATUS, TASKTYPE, ES, CASE WHEN t2.OBJSTORE_ID > 0 THEN TO_CHAR(t2.OBJSTORE_ID) ELSE t3.destinationse END AS OBJSE
FROM ATLAS_PANDABIGMON.COMBINED_WAIT_ACT_DEF_ARCH4 t1
LEFT JOIN ATLAS_PANDA.JEDI_EVENTS t2 ON t1.PANDAID=t2.PANDAID and t1.JEDITASKID = t2.JEDITASKID and (t2.ziprow_id>0 or t2.OBJSTORE_ID > 0)
LEFT JOIN ATLAS_PANDA.filestable4 t3 ON (t3.pandaid = t2.pandaid and t3.JEDITASKID = t2.JEDITASKID and t3.row_id=t2.ziprow_id) WHERE t1.ES in (1) and t1.CLOUD='WORLD' and t1.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
AND t3.MODIFICATIONTIME > (sysdate - interval '{hours}' hour)
)
WHERE NOT OBJSE IS NULL
GROUP BY JOBSTATUS, JOBSTATUS, COMPUTINGSITE, OBJSE
order by OBJSE
""".format(hours=hours)
cur = connection.cursor()
cur.execute(sqlRequest)
rawsummary = fix_lob(cur)
return rawsummary
def objectstore_summary(request, hours=12):
object_stores = get_object_stores()
rawsummary = objectstore_summary_data(hours)
mObjectStores = {}
mObjectStoresTk = {}
if len(rawsummary) > 0:
for row in rawsummary:
id = -1
try:
id = int(row[3])
except ValueError:
pass
if not row[3] is None and id in object_stores:
osName = object_stores[id]['name']
else:
osName = "Not defined"
compsite = row[2]
status = row[0]
count = row[1]
tk = setCacheData(request, pandaid=row[4], compsite=row[2])
if osName in mObjectStores:
if not compsite in mObjectStores[osName]:
mObjectStores[osName][compsite] = {}
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
if not status in mObjectStoresTk[osName]:
mObjectStoresTk[osName][status] = []
mObjectStoresTk[osName][status].append(tk)
else:
mObjectStores[osName] = {}
mObjectStores[osName][compsite] = {}
mObjectStoresTk[osName] = {}
mObjectStoresTk[osName][status] = []
for state in const.JOB_STATES_SITE + ["closed"]:
mObjectStores[osName][compsite][state] = {'count': 0, 'tk': 0}
mObjectStores[osName][compsite][status] = {'count': count, 'tk': tk}
mObjectStoresTk[osName][status].append(tk)
# Getting tk's for parents
for osName in mObjectStoresTk:
for state in mObjectStoresTk[osName]:
mObjectStoresTk[osName][state] = setCacheData(request, childtk=','.join(mObjectStoresTk[osName][state]))
mObjectStoresSummary = {}
for osName in mObjectStores:
mObjectStoresSummary[osName] = {}
for site in mObjectStores[osName]:
for state in mObjectStores[osName][site]:
if state in mObjectStoresSummary[osName]:
mObjectStoresSummary[osName][state]['count'] += mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
else:
mObjectStoresSummary[osName][state] = {}
mObjectStoresSummary[osName][state]['count'] = mObjectStores[osName][site][state]['count']
mObjectStoresSummary[osName][state]['tk'] = 0
for osName in mObjectStoresSummary:
for state in mObjectStoresSummary[osName]:
if mObjectStoresSummary[osName][state]['count'] > 0:
mObjectStoresSummary[osName][state]['tk'] = mObjectStoresTk[osName][state]
return mObjectStores, mObjectStoresSummary
| {
"content_hash": "80a59713b477e258508d9612b4f39bc8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 236,
"avg_line_length": 43.94117647058823,
"alnum_prop": 0.6135653726015172,
"repo_name": "PanDAWMS/panda-bigmon-core",
"id": "d019d78f1fe1e1bb3db347aa4cfe02d5677891d9",
"size": "4482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/pandajob/summary_os.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "394242"
},
{
"name": "Dockerfile",
"bytes": "5386"
},
{
"name": "HTML",
"bytes": "1445043"
},
{
"name": "JavaScript",
"bytes": "6858568"
},
{
"name": "Python",
"bytes": "1943651"
},
{
"name": "SCSS",
"bytes": "3129"
},
{
"name": "Shell",
"bytes": "1411"
}
],
"symlink_target": ""
} |
from VideoReader import *
from ArgParser import *
import sys, cv2
"""
usage:
python PlayStereoVideo.py <path_to_data> <basename><camera_num>.avi
for example:
python PlayStereoVideo.py /scail/group/deeplearning/driving_data/q50_data/4-2-14-monterey/ 17N_b1.avi
under this command, reader1 will load 17N_b1.avi
reader2 will load 17N_b2.avi
If you switch the command to python PlayStereoVideo.py /scail/group..../4-2-14-monterey/ 17N_b2.avi,
then you will have reader1 loading 17N_b2.avi and reader2 loading 17N_b1.avi. The point is that reader1 does not necessairly load the camera 1.
"""
if __name__ == '__main__':
args = parse_args(sys.argv[1], sys.argv[2])
reader1 = VideoReader(args['video'])
reader2 = VideoReader(args['opposite_video'])
while True:
(success1, I1) = reader1.getNextFrame()
(success2, I2) = reader2.getNextFrame()
if not success1 or not success2:
break
cv2.imshow('video1', cv2.pyrDown(I1))
cv2.imshow('video2', cv2.pyrDown(I2))
cv2.waitKey(5)
| {
"content_hash": "8b7fcc27fe80648506fb26b1ce3a4386",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 144,
"avg_line_length": 36.10344827586207,
"alnum_prop": 0.6876790830945558,
"repo_name": "sameeptandon/sail-car-log",
"id": "df45773d2f52f52c27221acc7646efa81dcec1ac",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process/tests/PlayStereoVideo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "283486"
},
{
"name": "C++",
"bytes": "428270"
},
{
"name": "CMake",
"bytes": "75122"
},
{
"name": "CSS",
"bytes": "1110"
},
{
"name": "Cuda",
"bytes": "31989"
},
{
"name": "HTML",
"bytes": "2414"
},
{
"name": "JavaScript",
"bytes": "12886"
},
{
"name": "Matlab",
"bytes": "14794"
},
{
"name": "Protocol Buffer",
"bytes": "4913"
},
{
"name": "Python",
"bytes": "870911"
},
{
"name": "Shell",
"bytes": "2144"
}
],
"symlink_target": ""
} |
__all__ = tuple('''registerWidget getCodes getCodeNames createBarcodeDrawing createBarcodeImageInMemory'''.split())
__version__ = '0.9'
__doc__='''Popular barcodes available as reusable widgets'''
_widgets = []
def registerWidget(widget):
_widgets.append(widget)
def _reset():
_widgets[:] = []
from reportlab.graphics.barcode.widgets import BarcodeI2of5, BarcodeCode128, BarcodeStandard93,\
BarcodeExtended93, BarcodeStandard39, BarcodeExtended39,\
BarcodeMSI, BarcodeCodabar, BarcodeCode11, BarcodeFIM,\
BarcodePOSTNET, BarcodeUSPS_4State, BarcodeCode128Auto, BarcodeECC200DataMatrix
#newer codes will typically get their own module
from reportlab.graphics.barcode.eanbc import Ean13BarcodeWidget, Ean8BarcodeWidget, UPCA, Ean5BarcodeWidget, ISBNBarcodeWidget
from reportlab.graphics.barcode.qr import QrCodeWidget
for widget in (BarcodeI2of5,
BarcodeCode128,
BarcodeCode128Auto,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
Ean13BarcodeWidget,
Ean8BarcodeWidget,
UPCA,
Ean5BarcodeWidget,
ISBNBarcodeWidget,
QrCodeWidget,
BarcodeECC200DataMatrix,
):
registerWidget(widget)
_reset()
from reportlab.rl_config import register_reset
register_reset(_reset)
def getCodes():
"""Returns a dict mapping code names to widgets"""
#the module exports a dictionary of names to widgets, to make it easy for
#apps and doc tools to display information about them.
codes = {}
for widget in _widgets:
codeName = widget.codeName
codes[codeName] = widget
return codes
def getCodeNames():
"""Returns sorted list of supported bar code names"""
return sorted(getCodes().keys())
def createBarcodeDrawing(codeName, **options):
"""This creates and returns a drawing with a barcode.
"""
from reportlab.graphics.shapes import Drawing, Group
codes = getCodes()
bcc = codes[codeName]
width = options.pop('width',None)
height = options.pop('height',None)
isoScale = options.pop('isoScale',0)
kw = {}
for k,v in options.items():
if k.startswith('_') or k in bcc._attrMap: kw[k] = v
bc = bcc(**kw)
#Robin's new ones validate when setting the value property.
#Ty Sarna's old ones do not. We need to test.
if hasattr(bc, 'validate'):
bc.validate() #raise exception if bad value
if not bc.valid:
raise ValueError("Illegal barcode with value '%s' in code '%s'" % (options.get('value',None), codeName))
#size it after setting the data
x1, y1, x2, y2 = bc.getBounds()
w = float(x2 - x1)
h = float(y2 - y1)
sx = width not in ('auto',None)
sy = height not in ('auto',None)
if sx or sy:
sx = sx and width/w or 1.0
sy = sy and height/h or 1.0
if isoScale:
if sx<1.0 and sy<1.0:
sx = sy = max(sx,sy)
else:
sx = sy = min(sx,sy)
w *= sx
h *= sy
else:
sx = sy = 1
#bc.x = -sx*x1
#bc.y = -sy*y1
d = Drawing(width=w,height=h,transform=[sx,0,0,sy,-sx*x1,-sy*y1])
d.add(bc, "_bc")
return d
def createBarcodeImageInMemory(codeName,**options):
"""This creates and returns barcode as an image in memory.
Takes same arguments as createBarcodeDrawing and also an
optional format keyword which can be anything acceptable
to Drawing.asString eg gif, pdf, tiff, py ......
"""
format = options.pop('format','png')
d = createBarcodeDrawing(codeName, **options)
return d.asString(format)
| {
"content_hash": "263d486c0fd5d136273d8a3582d6478f",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 130,
"avg_line_length": 34.44444444444444,
"alnum_prop": 0.6086848635235732,
"repo_name": "Distrotech/reportlab",
"id": "25d98e719baffc4a00048070ff1e2e8fff269828",
"size": "5763",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/reportlab/graphics/barcode/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "721758"
},
{
"name": "C++",
"bytes": "668"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2988317"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
} |
"""
Tests for dtypes.py
"""
# Import Python 3 features for future-proofing
# Deliberately do NOT import unicode_literals due to a bug in numpy dtypes:
# https://github.com/numpy/numpy/issues/2407
from __future__ import absolute_import, division, print_function
import unittest
import mrcfile.dtypes as dtypes
import mrcfile.utils as utils
from .helpers import AssertRaisesRegexMixin
class DtypesTest(AssertRaisesRegexMixin, unittest.TestCase):
"""Unit tests for mrcfile.dtypes"""
def test_invalid_byte_order_raises_exception(self):
with self.assertRaisesRegex(ValueError, "Unrecognised byte order indicator"):
_ = dtypes.get_ext_header_dtype('', 'a')
def test_fei1_ext_header_with_native_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI1')
assert dtype.itemsize == 768
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '=')
with self.assertRaises(KeyError):
_ = dtype['Scan rotation']
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
def test_fei2_ext_header_with_native_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI2')
assert dtype.itemsize == 888
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '=')
assert dtype['Scan rotation'] is not None
assert utils.byte_orders_equal(dtype['Scan rotation'].byteorder, '=')
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
def test_fei1_ext_header_with_little_endian_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI1', '<')
# Normal fields should match the requested byte order
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Alpha tilt'].byteorder, '<')
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
def test_fei2_ext_header_with_little_endian_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI2', '<')
# Normal fields should match the requested byte order
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Alpha tilt'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Scan rotation'].byteorder, '<')
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
def test_fei1_ext_header_with_big_endian_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI1', '>')
# Normal fields should match the requested byte order
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '>')
assert utils.byte_orders_equal(dtype['Alpha tilt'].byteorder, '>')
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
def test_fei2_ext_header_with_big_endian_byte_order(self):
dtype = dtypes.get_ext_header_dtype(b'FEI2', '>')
# Normal fields should match the requested byte order
assert utils.byte_orders_equal(dtype['Metadata size'].byteorder, '>')
assert utils.byte_orders_equal(dtype['Alpha tilt'].byteorder, '>')
assert utils.byte_orders_equal(dtype['Scan rotation'].byteorder, '>')
# Bitmasks should always be little-endian
assert utils.byte_orders_equal(dtype['Bitmask 1'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 2'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 3'].byteorder, '<')
assert utils.byte_orders_equal(dtype['Bitmask 4'].byteorder, '<')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ed285d64a657ecbdd1aa028266866091",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 85,
"avg_line_length": 51.48453608247423,
"alnum_prop": 0.6659991990388466,
"repo_name": "ccpem/mrcfile",
"id": "7b7988f96451e011cd2481a2849ff6d5c900b039",
"size": "5128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dtypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "263866"
}
],
"symlink_target": ""
} |
import os.path
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib.auth.views import logout as logout_view
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from nose.tools import eq_, ok_
from waffle import Flag
from mozillians.common.tests import TestCase, requires_login, requires_vouch
from mozillians.phonebook.models import Invite
from mozillians.phonebook.tests import InviteFactory, _get_privacy_fields
from mozillians.users.managers import MOZILLIANS, PRIVILEGED, PUBLIC
from mozillians.users.models import UserProfilePrivacyModel
from mozillians.users.tests import UserFactory
class SearchTests(TestCase):
def test_search_plugin_anonymous(self):
client = Client()
response = client.get(reverse('phonebook:search_plugin'), follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
def test_search_plugin_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('phonebook:search_plugin'),
follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
def test_search_plugin_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:search_plugin'),
follow=True)
eq_(response.status_code, 200)
eq_(response.get('content-type'),
'application/opensearchdescription+xml')
class InviteTests(TestCase):
@requires_login()
def test_invite_anonymous(self):
client = Client()
client.get(reverse('phonebook:invite'), follow=True)
@requires_vouch()
def test_invite_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
client.get(reverse('phonebook:invite'), follow=True)
def test_invite_get_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:invite'), follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.messages.success')
def test_invite_post_vouched(self, success_mock):
user = UserFactory.create()
url = reverse('phonebook:invite', prefix='/en-US/')
data = {
'message': 'Join us foo!',
'recipient': 'foo@example.com',
'description': 'A test reason'
}
with self.login(user) as client:
response = client.post(url, data, follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
ok_(Invite.objects
.filter(recipient='foo@example.com', inviter=user.userprofile)
.exists())
ok_(success_mock.called)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_invite_already_vouched(self):
vouched_user = UserFactory.create()
user = UserFactory.create()
url = reverse('phonebook:invite', prefix='/en-US/')
data = {'recipient': vouched_user.email}
with self.login(user) as client:
response = client.post(url, data, follow=True)
self.assertTemplateUsed(response, 'phonebook/invite.html')
ok_('recipient' in response.context['invite_form'].errors)
eq_(Invite.objects.all().count(), 0)
def test_invite_delete(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile)
url = reverse('phonebook:delete_invite', prefix='/en-US/', kwargs={'invite_pk': invite.pk})
with self.login(user) as client:
response = client.post(url, follow=True)
eq_(Invite.objects.all().count(), 0)
eq_(response.status_code, 200)
def test_invite_delete_invalid_requester(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile)
url = reverse('phonebook:delete_invite', prefix='/en-US/', kwargs={'invite_pk': invite.pk})
invalid_requester = UserFactory.create(userprofile={'is_vouched': True})
with self.login(invalid_requester) as client:
response = client.post(url)
eq_(Invite.objects.all().count(), 1)
eq_(response.status_code, 404)
def test_invite_delete_redeemed(self):
user = UserFactory.create(userprofile={'is_vouched': True})
invite = InviteFactory.create(inviter=user.userprofile, redeemed=datetime.now())
url = reverse('phonebook:delete_invite', prefix='/en-US/', kwargs={'invite_pk': invite.pk})
with self.login(user) as client:
response = client.post(url)
eq_(Invite.objects.all().count(), 1)
eq_(response.status_code, 404)
def test_invite_delete_invalid_invite(self):
user = UserFactory.create(userprofile={'is_vouched': True})
url = reverse('phonebook:delete_invite', prefix='/en-US/', kwargs={'invite_pk': '1'})
with self.login(user) as client:
response = client.post(url)
eq_(response.status_code, 404)
class VouchFormTests(TestCase):
def test_vouch_not_vouched(self):
user = UserFactory.create(vouched=False, userprofile={'privacy_full_name': PUBLIC})
voucher = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_view', args=[user.username], prefix='/en-US/')
data = {'vouchee': user.userprofile.id,
'description': 'a reason'}
with self.login(voucher) as client:
client.post(url, data)
unvouched_user = User.objects.get(id=user.id)
ok_(not unvouched_user.userprofile.is_vouched)
def test_vouch_no_description(self):
user = UserFactory.create(vouched=False)
voucher = UserFactory.create()
url = reverse('phonebook:profile_view', args=[user.username], prefix='/en-US/')
data = {'vouchee': user.userprofile.id,
'description': ''}
with self.login(voucher) as client:
client.post(url, data)
unvouched_user = User.objects.get(id=user.id)
ok_(not unvouched_user.userprofile.is_vouched)
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.messages.info')
def test_vouch_unvouched(self, info_mock):
user = UserFactory.create(vouched=False)
user.userprofile.vouch(None)
unvouched_user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_view', args=[unvouched_user.username], prefix='/en-US/')
data = {'vouchee': unvouched_user.userprofile.id,
'description': 'a reason'}
with self.login(user) as client:
response = client.post(url, data, follow=True)
unvouched_user = User.objects.get(id=unvouched_user.id)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['profile'], unvouched_user.userprofile)
ok_(unvouched_user.userprofile.is_vouched)
ok_(info_mock.called)
self.assertRedirects(response, url)
class LogoutTests(TestCase):
@requires_login()
def test_logout_anonymous(self):
client = Client()
client.get(reverse('phonebook:logout'), follow=True)
@patch('mozillians.phonebook.views.auth_logout', wraps=logout_view)
def test_logout_unvouched(self, logout_mock):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('phonebook:logout'), follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/home.html')
ok_(logout_mock.called)
@patch('mozillians.phonebook.views.auth_logout', wraps=logout_view)
def test_logout_vouched(self, logout_mock):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:logout'), follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'phonebook/home.html')
ok_(logout_mock.called)
class ImageTests(TestCase):
def _upload_photo(self, user, file_path):
"""Helper for the next methods."""
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'photo': open(file_path, 'rb'),
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'basic_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
def test_exif_broken(self):
"""Test image with broken EXIF data."""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'broken_exif.jpg')
self._upload_photo(user, file_path)
def test_no_rgb_colorspace(self):
"""Test with image not in RGB colorspace.
Related bug 928959.
"""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__),
'broken_colorspace.gif')
self._upload_photo(user, file_path)
def test_converted_larger_image(self):
"""Test image which gets cleaned in forms.py.
Bug 921243 was caused of a valid image, without EXIF
data. That caused image._get_exif() in
phonebook.forms.ProfileForm.clean_photo to raise an
AttributeError and clean the image.
Cleaning the image (by re-saving) did not set the new file
size in the `photo` variable. If the cleaned image was larger
than the original image, this behavior resulted in corrupted
images being fed into PIL, which raises IOErrors.
This test reproduces that behavior and should fail if we don't
update the size of `photo` with the new cleaned image size.
"""
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'broken_marshal.jpg')
self._upload_photo(user, file_path)
def test_save_profile_with_existing_photo(self):
"""Test profiles saves when keep the existing photo.
Related bug 925256.
"""
# Set a user with a photo
user = UserFactory.create()
file_path = os.path.join(os.path.dirname(__file__), 'normal_photo.jpg')
self._upload_photo(user, file_path)
# Re-save profile without uploading a new photo.
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'basic_section': ''
}
for field in UserProfilePrivacyModel._meta.fields:
data[field.name] = MOZILLIANS
data['privacy_tshirt'] = PRIVILEGED
url = reverse('phonebook:profile_edit', prefix='/en-US/')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
class DateValidationTests(TestCase):
def test_date_mozillian_validates_in_different_locales(self):
"""Tests if date_mozillian validates when profile language is e.g. 'es'.
Related bug 914448.
"""
user = UserFactory.create(email='es@example.com')
data = {
'full_name': user.userprofile.full_name,
'email': user.email,
'username': user.username,
'lat': 40.005814,
'lng': -3.42071,
'date_mozillian_year': '2013',
'date_mozillian_month': '1',
'externalaccount_set-MAX_NUM_FORMS': '1000',
'externalaccount_set-INITIAL_FORMS': '0',
'externalaccount_set-TOTAL_FORMS': '0',
'language_set-MAX_NUM_FORMS': '1000',
'language_set-INITIAL_FORMS': '0',
'language_set-TOTAL_FORMS': '0',
'contribution_section': ''
}
data.update(_get_privacy_fields(MOZILLIANS))
url = reverse('phonebook:profile_edit', prefix='/es/')
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
class AboutTests(TestCase):
def test_base(self):
url = reverse('phonebook:about')
client = Client()
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed('phonebook/about.html')
class AboutDinoMcVouchTests(TestCase):
def test_base(self):
url = reverse('phonebook:about-dinomcvouch')
client = Client()
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed('phonebook/about-dinomcvouch.html')
class VouchTests(TestCase):
def test_vouch_disabled(self):
# Test that 'vouched' view is not active by default.
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_vouch', args=[user.username])
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.is_vouched)
def test_unvouch_disabled(self):
# Test that 'unvouched' view is not active by default.
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_unvouch', args=[user.username])
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.is_vouched)
def test_vouch(self):
Flag.objects.create(name='testing-autovouch-views', everyone=True)
user = UserFactory.create(vouched=False)
ok_(not user.userprofile.is_vouched)
url = reverse('phonebook:profile_vouch', args=[user.username])
with self.login(user) as client:
client.get(url, follow=True)
user = User.objects.get(id=user.id)
eq_(user.userprofile.vouches_received.all().count(), 1)
eq_(user.userprofile.vouches_received.all()[0].autovouch, True)
def test_unvouch(self):
Flag.objects.create(name='testing-autovouch-views', everyone=True)
user = UserFactory.create()
ok_(user.userprofile.is_vouched)
url = reverse('phonebook:profile_unvouch', args=[user.username])
with self.login(user) as client:
client.get(url, follow=True)
user = User.objects.get(id=user.id)
ok_(not user.userprofile.vouches_received.all().exists())
| {
"content_hash": "dbae11078b19aa8ba4c2948005f6cab8",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 99,
"avg_line_length": 40.83589743589744,
"alnum_prop": 0.6270249905814391,
"repo_name": "brian-yang/mozillians",
"id": "b18d84906ae6686e9437b6a3ad29f17587e8444a",
"size": "15926",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mozillians/phonebook/tests/test_views/test_views_misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "210671"
},
{
"name": "HTML",
"bytes": "184994"
},
{
"name": "JavaScript",
"bytes": "154038"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "9184371"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from rtgraph.core.constants import Constants
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'RTGraph'
copyright = '2016, Sebastian Sepulveda'
author = 'Sebastian Sepulveda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
[major, minor, release] = Constants.app_version.split(".")
version = "{}.{}".format(major, minor)
# The full version, including alpha/beta/rc tags.
release = "{}".format(release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RTGraphdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RTGraph.tex', 'RTGraph Documentation',
'Sebastian Sepulveda', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rtgraph', 'RTGraph Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RTGraph', 'RTGraph Documentation',
author, 'RTGraph', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "bfd6c191cd87fe9f2003726b1246903e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 30.08450704225352,
"alnum_prop": 0.6638576779026217,
"repo_name": "ssepulveda/RTGraph",
"id": "2e56a21ee0064aad8337abb9489cfc89eeb27322",
"size": "4955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "777"
},
{
"name": "Python",
"bytes": "56288"
}
],
"symlink_target": ""
} |
__revision__ = "test/CacheDir/environment.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that whether or not a target gets retrieved from a CacheDir
is configurable by construction environment.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
cache = test.workpath('cache')
src_aaa_out = test.workpath('src', 'aaa.out')
src_bbb_out = test.workpath('src', 'bbb.out')
src_ccc_out = test.workpath('src', 'ccc.out')
src_cat_out = test.workpath('src', 'cat.out')
src_all = test.workpath('src', 'all')
test.subdir('cache', 'src')
test.write(['src', 'SConstruct'], """\
CacheDir(r'%(cache)s')
SConscript('SConscript')
""" % locals())
test.write(['src', 'SConscript'], """\
def cat(env, source, target):
target = str(target[0])
open('cat.out', 'ab').write(target + "\\n")
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
env_cache = Environment(BUILDERS={'Cat':Builder(action=cat)})
env_nocache = env_cache.Clone()
env_nocache.CacheDir(None)
env_cache.Cat('aaa.out', 'aaa.in')
env_nocache.Cat('bbb.out', 'bbb.in')
env_cache.Cat('ccc.out', 'ccc.in')
env_nocache.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
""")
test.write(['src', 'aaa.in'], "aaa.in\n")
test.write(['src', 'bbb.in'], "bbb.in\n")
test.write(['src', 'ccc.in'], "ccc.in\n")
# Verify that building with -n and an empty cache reports that proper
# build operations would be taken, but that nothing is actually built
# and that the cache is still empty.
test.run(chdir = 'src', arguments = '-n .', stdout = test.wrap_stdout("""\
cat(["aaa.out"], ["aaa.in"])
cat(["bbb.out"], ["bbb.in"])
cat(["ccc.out"], ["ccc.in"])
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
"""))
test.must_not_exist(src_aaa_out)
test.must_not_exist(src_bbb_out)
test.must_not_exist(src_ccc_out)
test.must_not_exist(src_all)
# Even if you do -n, the cache will be configured.
test.fail_test(os.listdir(cache) != ['config'])
# Verify that a normal build works correctly, and clean up.
# This should populate the cache with our derived files.
test.run(chdir = 'src', arguments = '.')
test.must_match(['src', 'all'], "aaa.in\nbbb.in\nccc.in\n")
test.must_match(src_cat_out, "aaa.out\nbbb.out\nccc.out\nall\n")
test.up_to_date(chdir = 'src', arguments = '.')
test.run(chdir = 'src', arguments = '-c .')
test.unlink(src_cat_out)
# Verify that we now retrieve the derived files from cache,
# not rebuild them. Then clean up.
test.run(chdir = 'src', arguments = '.', stdout = test.wrap_stdout("""\
Retrieved `aaa.out' from cache
cat(["bbb.out"], ["bbb.in"])
Retrieved `ccc.out' from cache
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
"""))
test.must_match(src_cat_out, "bbb.out\nall\n")
test.up_to_date(chdir = 'src', arguments = '.')
test.run(chdir = 'src', arguments = '-c .')
test.unlink(src_cat_out)
# Verify that rebuilding with -n reports that files were retrieved
# from the cache, but that nothing really was.
test.run(chdir = 'src', arguments = '-n .', stdout = test.wrap_stdout("""\
Retrieved `aaa.out' from cache
cat(["bbb.out"], ["bbb.in"])
Retrieved `ccc.out' from cache
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
"""))
test.must_not_exist(src_aaa_out)
test.must_not_exist(src_bbb_out)
test.must_not_exist(src_ccc_out)
test.must_not_exist(src_all)
# Verify that rebuilding with -s retrieves everything from the cache
# even though it doesn't report anything.
test.run(chdir = 'src', arguments = '-s .', stdout = "")
test.must_match(['src', 'all'], "aaa.in\nbbb.in\nccc.in\n")
test.must_match(src_cat_out, "bbb.out\nall\n")
test.up_to_date(chdir = 'src', arguments = '.')
test.run(chdir = 'src', arguments = '-c .')
test.unlink(src_cat_out)
# Verify that updating one input file builds its derived file and
# dependency but that the other files are retrieved from cache.
test.write(['src', 'bbb.in'], "bbb.in 2\n")
test.run(chdir = 'src', arguments = '.', stdout = test.wrap_stdout("""\
Retrieved `aaa.out' from cache
cat(["bbb.out"], ["bbb.in"])
Retrieved `ccc.out' from cache
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
"""))
test.must_match(['src', 'all'], "aaa.in\nbbb.in 2\nccc.in\n")
test.must_match(src_cat_out, "bbb.out\nall\n")
test.up_to_date(chdir = 'src', arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "1540628d1d89ecd04c8335a578cb562d",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 102,
"avg_line_length": 30.28965517241379,
"alnum_prop": 0.6564207650273224,
"repo_name": "EmanueleCannizzaro/scons",
"id": "bcd0255ce3074b35a08d08fd140c1320190aef9b",
"size": "5527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/CacheDir/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestDataFormatterLibcxxQueue(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.namespace = 'std'
def check_variable(self, name):
var = self.frame().FindVariable(name)
self.assertTrue(var.IsValid())
queue = self.namespace + '::queue'
self.assertTrue(queue in var.GetDisplayTypeName())
self.assertEqual(var.GetNumChildren(), 5)
for i in range(5):
ch = var.GetChildAtIndex(i)
self.assertTrue(ch.IsValid())
self.assertEqual(ch.GetValueAsSigned(), i+1)
@expectedFailureAll(bugnumber="llvm.org/pr36109", debug_info="gmodules", triple=".*-android")
@add_test_categories(["libc++"])
def test(self):
"""Test that std::queue is displayed correctly"""
self.build()
lldbutil.run_to_source_breakpoint(self, '// break here',
lldb.SBFileSpec("main.cpp", False))
self.check_variable('q1')
self.check_variable('q2')
| {
"content_hash": "624e6ddcf98caa3abd7bff9b2ef7fa46",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.6350123864574732,
"repo_name": "endlessm/chromium-browser",
"id": "f2f9f670c714476c246e161cdd02ec447a6f5ea3",
"size": "1211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx/queue/TestDataFormatterLibcxxQueue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
def optimize(optimizations=None):
"""A transformation that applies optimizations.
Args:
optimizations: (Optional.) A `tf.string` vector `tf.Tensor` identifying
optimizations to use. If not specified, the default set of optimizations
is applied.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return OptimizeDataset(dataset, optimizations)
return _apply_fn
class OptimizeDataset(dataset_ops.Dataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations):
"""See `optimize()` for details."""
super(OptimizeDataset, self).__init__()
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
def _as_variant_tensor(self):
return gen_dataset_ops.optimize_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._optimizations,
output_shapes=nest.flatten(
sparse.as_dense_shapes(self.output_shapes, self.output_classes)),
output_types=nest.flatten(
sparse.as_dense_types(self.output_types, self.output_classes)))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
| {
"content_hash": "2e857b5a04e1c235298d414cadcf5662",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 97,
"avg_line_length": 34.06060606060606,
"alnum_prop": 0.7215302491103203,
"repo_name": "dendisuhubdy/tensorflow",
"id": "cad41bce2961f29a7591fe3d382d1ab35a6b38b4",
"size": "2937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/ops/optimization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "304178"
},
{
"name": "C++",
"bytes": "43473103"
},
{
"name": "CMake",
"bytes": "202538"
},
{
"name": "Go",
"bytes": "1148824"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "755551"
},
{
"name": "Jupyter Notebook",
"bytes": "2211560"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48603"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "36820408"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428510"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
from wtforms import TextField, SelectField, TextAreaField
from wtforms.validators import DataRequired, URL
from wtforms.widgets import TextArea
from ..datacenter import DatacenterCheckForm
class HeaderList(object):
"""Custom wtform validator for headers."""
def __call__(self, form, field):
try:
for header in str.splitlines(str(field.data)):
header = header.strip()
# Ignore empty lines
if not header:
continue
key, value = header.split(':')
key = key.strip()
value = value.strip()
assert key
assert value
except Exception:
raise ValidationError('Invalid headers. Use key:value format.')
class CheckForm(DatacenterCheckForm):
''' Class that creates an HTTP Get Status Code form for the dashboard '''
title = "HTTP: Keyword Search"
description = """
This monitor will perform an HTTP GET request and search the returned data for a keyword. Keywords can be Regular Expressions or simple strings.
"""
placeholders = DatacenterCheckForm.placeholders
placeholders.update({
'keyword' : 'Could not connect to Database',
})
present_choices = [
("True", "True"),
("False", "False")
]
regex_choices = [
("True", "True"),
("False", "False")
]
url = TextField(
"URL",
description = """
Website or Application URL
""",
validators=[URL(message='Must be a url such as "https://127.0.0.1"')])
host = TextField(
"Host Header",
description = """
Host header, allows users to make the request to a domain where the URL is different. For example a request to http://10.0.0.1 with a Host header of example.com will return the index for example.com
""",
validators=[DataRequired(message='Host header is a required field')])
keyword = TextField(
"Keyword",
description = """
Keyword to search for. This can be anything from a simple word to HTML code or Database Error
""",
validators=[DataRequired(message='Keyword is a required field')])
present = SelectField(
"Presence",
description = """
If True the Monitor will be True when the Keyword is present and False when not. If set to False the monitor will be False when the Keyword is present.
""",
choices=present_choices,
validators=[DataRequired(message='Present is a required field')])
extra_headers = TextAreaField(
'Additional HTTP Headers',
description = """
Use this field to add additional HTTP headers. Values are in a : seperated Key:Value format.
""",
validators=[HeaderList()],
)
regex = SelectField(
"Regex",
description = """
Specify whether the Keyword is a Regular Expression or not
""",
choices=regex_choices,
validators=[DataRequired(message='Regex is a required field')])
if __name__ == '__main__':
pass
| {
"content_hash": "5ef4bc89c5126cfa8d477f5221df5e36",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 210,
"avg_line_length": 36.39080459770115,
"alnum_prop": 0.5988629185091598,
"repo_name": "madflojo/cloudroutes-service",
"id": "a697af4daff7a3eecbf52c111e21de05710c3bad",
"size": "3452",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/web/monitorforms/http-keyword/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227943"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "678083"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
} |
from wsgiref import util
from pytest import raises
from watson.di.container import IocContainer
from watson.events.types import Event
from watson.http.messages import Request, Response
from watson.routing.routers import DictRouter
from watson.routing.routes import RouteMatch, LiteralRoute
from watson.framework.exceptions import NotFoundError, InternalServerError
from watson.framework import listeners, config, views, applications
from tests.watson.framework.support import sample_environ
class TestBaseListener(object):
def test_missing_call(self):
with raises(TypeError):
listeners.Base()
class TestRouteListener(object):
def create_event(self, **kwargs):
router = DictRouter({'home': {'path': '/'}})
environ = {}
util.setup_testing_defaults(environ)
environ.update(**kwargs)
context = {'request': Request.from_environ(environ)}
event = Event(
'TestEvent',
params={'router': router,
'context': context})
return event
def test_response(self):
listener = listeners.Route()
result = listener(self.create_event())
assert isinstance(result, RouteMatch)
def test_not_found(self):
with raises(NotFoundError):
listener = listeners.Route()
listener(self.create_event(PATH_INFO='/test'))
class TestDispatchExecuteListener(object):
def test_execute(self):
with raises(InternalServerError):
route = LiteralRoute('test', path='/')
match = RouteMatch(route, {})
event = Event('something', params={'context': {'route_match': match}})
listener = listeners.DispatchExecute({'404': 'page/404'})
listener(event)
def test_short_circuit(self):
environ = sample_environ()
route = LiteralRoute(
'test',
path='/',
options={'controller': 'tests.watson.framework.support.ShortCircuitedController'})
match = RouteMatch(route, {})
context = {'request': Request.from_environ(environ), 'route_match': match}
event = Event(
'something',
params={'container': IocContainer(), 'context': context})
listener = listeners.DispatchExecute({'404': 'page/404'})
response, view_model = listener(event)
assert isinstance(response, Response)
def test_returned_view_model(self):
environ = sample_environ()
route = LiteralRoute(
'test',
path='/',
options={'controller': 'tests.watson.framework.support.SampleActionController'})
match = RouteMatch(route, {'action': 'view_model'})
context = {'request': Request.from_environ(environ), 'route_match': match}
event = Event(
'something',
params={'container': IocContainer(), 'context': context})
listener = listeners.DispatchExecute({'404': 'page/404'})
response, view_model = listener(event)
assert isinstance(view_model, views.Model)
assert view_model.template == 'sampleactioncontroller/view_model'
def test_overriden_template(self):
environ = sample_environ()
route = LiteralRoute(
'test',
path='/',
options={'controller': 'tests.watson.framework.support.SampleActionController'})
match = RouteMatch(route, {'action': 'view_model_template'})
context = {'request': Request.from_environ(environ), 'route_match': match}
event = Event(
'something',
params={'container': IocContainer(), 'context': context})
listener = listeners.DispatchExecute({'404': 'page/500'})
response, view_model = listener(event)
assert isinstance(view_model, views.Model)
assert view_model.template == 'sampleactioncontroller/404'
class TestExceptionListener(object):
pass
class TestRenderListener(object):
def test_execute(self):
app = applications.Http()
listener = listeners.Render(config.views)
context = {
'response': Response(200),
}
vm = views.Model(format='text/plain', data={'content': 'test'})
params = {'context': context, 'view_model': vm, 'container': app.container}
event = Event('render', params=params)
# error raised as no template exists
with raises(InternalServerError):
listener(event)
with raises(InternalServerError):
vm.format = 'exe'
listener(event)
vm.format = 'json'
response = listener(event)
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
| {
"content_hash": "aeae5384dc618ab7a5e18a0a55185b2f",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 94,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.623472397808681,
"repo_name": "watsonpy/watson-framework",
"id": "eaf24bf042bfaf938349b7961e317e9ceb0cf45d",
"size": "4770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/watson/framework/test_listeners.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "24155"
},
{
"name": "Python",
"bytes": "128983"
}
],
"symlink_target": ""
} |
from ...core import Expr, Integer, Tuple
from ...functions import floor
from ...logic import true
from .matexpr import MatrixExpr
def normalize(i, parentsize):
if isinstance(i, slice):
i = (i.start, i.stop, i.step)
if not isinstance(i, (tuple, list, Tuple)):
if (i < Integer(0)) == true:
i += parentsize
i = (i, i + 1, 1)
i = list(i)
if len(i) == 2:
i.append(1)
start, stop, step = i
start = start or 0
if stop is None:
stop = parentsize
if (start < Integer(0)) == true:
start += parentsize
if (stop < Integer(0)) == true:
stop += parentsize
step = step or 1
if ((stop - start) * step < Integer(1)) == true:
raise IndexError()
return start, stop, step
class MatrixSlice(MatrixExpr):
"""A MatrixSlice of a Matrix Expression
Examples
========
>>> M = ImmutableMatrix(4, 4, range(16))
>>> print(M)
Matrix([
[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = MatrixSlice(M, (0, 2), (2, 4))
>>> print(ImmutableMatrix(B))
Matrix([
[2, 3],
[6, 7]])
"""
parent = property(lambda self: self.args[0])
rowslice = property(lambda self: self.args[1])
colslice = property(lambda self: self.args[2])
def __new__(cls, parent, rowslice, colslice):
rowslice = normalize(rowslice, parent.shape[0])
colslice = normalize(colslice, parent.shape[1])
if ((0 > rowslice[0]) == true or
(parent.shape[0] < rowslice[1]) == true or
(0 > colslice[0]) == true or
(parent.shape[1] < colslice[1]) == true):
raise IndexError()
if isinstance(parent, MatrixSlice):
return mat_slice_of_slice(parent, rowslice, colslice)
return Expr.__new__(cls, parent, Tuple(*rowslice), Tuple(*colslice))
@property
def shape(self):
rows = self.rowslice[1] - self.rowslice[0]
rows = rows if self.rowslice[2] == 1 else floor(rows/self.rowslice[2])
cols = self.colslice[1] - self.colslice[0]
cols = cols if self.colslice[2] == 1 else floor(cols/self.colslice[2])
return rows, cols
def _entry(self, i, j):
return self.parent._entry(i*self.rowslice[2] + self.rowslice[0],
j*self.colslice[2] + self.colslice[0])
@property
def on_diag(self):
return self.rowslice == self.colslice
def slice_of_slice(s, t):
start1, stop1, step1 = s
start2, stop2, step2 = t
start = start1 + start2*step1
step = step1 * step2
stop = start1 + step1*stop2
assert stop <= stop1
return start, stop, step
def mat_slice_of_slice(parent, rowslice, colslice):
"""Collapse nested matrix slices
>>> X = MatrixSymbol('X', 10, 10)
>>> X[:, 1:5][5:8, :]
X[5:8, 1:5]
>>> X[1:9:2, 2:6][1:3, 2]
X[3:7:2, 4]
"""
row = slice_of_slice(parent.rowslice, rowslice)
col = slice_of_slice(parent.colslice, colslice)
return MatrixSlice(parent.parent, row, col)
| {
"content_hash": "6e78f17ba7b59269cc6e51f6afb3cef5",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 78,
"avg_line_length": 27.469026548672566,
"alnum_prop": 0.5554123711340206,
"repo_name": "skirpichev/omg",
"id": "7d21b4e2f8c9816aeb29a75b0794c35a2f0cc479",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/matrices/expressions/slice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10305079"
}
],
"symlink_target": ""
} |
from json import loads
from qiita_db.user import User
from qiita_db.software import Command, Parameters, DefaultParameters
from qiita_db.processing_job import ProcessingWorkflow, ProcessingJob
from qiita_db.exceptions import QiitaDBUnknownIDError
def list_commands_handler_get_req(artifact_types, exclude_analysis):
"""Retrieves the commands that can process the given artifact types
Parameters
----------
artifact_types : str
Comma-separated list of artifact types
exclude_analysis : bool
If True, return commands that are not part of the analysis pipeline
Returns
-------
dict of objects
A dictionary containing the commands information
{'status': str,
'message': str,
'commands': list of dicts of {'id': int,
'command': str,
'output': list of [str, str]}}
"""
artifact_types = artifact_types.split(',')
cmd_info = [
{'id': cmd.id, 'command': cmd.name, 'output': cmd.outputs}
for cmd in Command.get_commands_by_input_type(
artifact_types, exclude_analysis=exclude_analysis)]
return {'status': 'success',
'message': '',
'commands': cmd_info}
def list_options_handler_get_req(command_id):
"""Returns the available default parameters set for the given command
Parameters
----------
command_id : int
The command id
Returns
-------
dict of objects
A dictionary containing the commands information
{'status': str,
'message': str,
'options': list of dicts of {'id: str', 'name': str,
'values': dict of {str: str}}}
"""
command = Command(command_id)
options = [{'id': p.id, 'name': p.name, 'values': p.values}
for p in command.default_parameter_sets]
return {'status': 'success',
'message': '',
'options': options,
'req_options': command.required_parameters,
'opt_options': command.optional_parameters}
def workflow_handler_post_req(user_id, command_id, params):
"""Creates a new workflow in the system
Parameters
----------
user_id : str
The user creating the workflow
command_id : int
The first command to execute in the workflow
params : str
JSON representations of the parameters for the first command of
the workflow
Returns
-------
dict of objects
A dictionary containing the commands information
{'status': str,
'message': str,
'workflow_id': int}
"""
parameters = Parameters.load(Command(command_id), json_str=params)
status = 'success'
message = ''
try:
wf = ProcessingWorkflow.from_scratch(User(user_id), parameters)
except Exception as exc:
wf = None
wf_id = None
job_info = None
status = 'error'
message = str(exc.message)
if wf is not None:
# this is safe as we are creating the workflow for the first time
# and there is only one node. Remember networkx doesn't assure order
# of nodes
job = wf.graph.nodes()[0]
inputs = [a.id for a in job.input_artifacts]
job_cmd = job.command
wf_id = wf.id
job_info = {'id': job.id, 'inputs': inputs, 'label': job_cmd.name,
'outputs': job_cmd.outputs}
return {'status': status, 'message': message, 'workflow_id': wf_id,
'job': job_info}
def workflow_handler_patch_req(req_op, req_path, req_value=None,
req_from=None):
"""Patches a workflow
Parameters
----------
req_op : str
The operation to perform on the workflow
req_path : str
Path parameter with the workflow to patch
req_value : str, optional
The value that needs to be modified
req_from : str, optional
The original path of the element
Returns
-------
dict of {str: str}
A dictionary of the form: {'status': str, 'message': str} in which
status is the status of the request ('error' or 'success') and message
is a human readable string with the error message in case that status
is 'error'.
"""
if req_op == 'add':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 1:
return {'status': 'error',
'message': 'Incorrect path parameter'}
req_path = req_path[0]
try:
wf = ProcessingWorkflow(req_path)
except QiitaDBUnknownIDError:
return {'status': 'error',
'message': 'Workflow %s does not exist' % req_path}
req_value = loads(req_value)
dflt_params = DefaultParameters(req_value['dflt_params'])
req_params = req_value.get('req_params', None)
opt_params = req_value.get('opt_params', None)
connections = {ProcessingJob(k): v
for k, v in req_value['connections'].items()}
job = wf.add(dflt_params, connections=connections,
req_params=req_params, opt_params=opt_params)
job_cmd = job.command
return {'status': 'success',
'message': '',
'job': {'id': job.id,
'inputs': req_value['connections'].keys(),
'label': job_cmd.name,
'outputs': job_cmd.outputs}}
elif req_op == 'remove':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 2:
return {'status': 'error',
'message': 'Incorrect path parameter'}
wf_id = req_path[0]
job_id = req_path[1]
wf = ProcessingWorkflow(wf_id)
job = ProcessingJob(job_id)
wf.remove(job, cascade=True)
return {'status': 'success',
'message': ''}
else:
return {'status': 'error',
'message': 'Operation "%s" not supported. Current supported '
'operations: add' % req_op}
def workflow_run_post_req(workflow_id):
"""Submits the workflow for execution
Parameters
----------
workflow_id : str
The workflow id
Returns
-------
dict of {str: str}
A dictionary of the form: {'status': str, 'message': str} in which
status is the status of the request ('error' or 'success') and message
is a human readable string with the error message in case that status
is 'error'.
"""
try:
wf = ProcessingWorkflow(workflow_id)
except QiitaDBUnknownIDError:
return {'status': 'error',
'message': 'Workflow %s does not exist' % workflow_id}
wf.submit()
return {'status': 'success', 'message': ''}
def job_ajax_get_req(job_id):
"""Returns the job information
Parameters
----------
job_id : str
The job id
Returns
-------
dict of objects
A dictionary containing the job information
{'status': str,
'message': str,
'job_id': str,
'job_status': str,
'job_step': str,
'job_parameters': dict of {str: str}}
"""
job = ProcessingJob(job_id)
cmd = job.command
sw = cmd.software
job_status = job.status
job_error = job.log.msg if job.log is not None else None
return {'status': 'success',
'message': '',
'job_id': job.id,
'job_status': job_status,
'job_step': job.step,
'job_parameters': job.parameters.values,
'job_error': job_error,
'command': cmd.name,
'command_description': cmd.description,
'software': sw.name,
'software_version': sw.version}
def job_ajax_patch_req(req_op, req_path, req_value=None, req_from=None):
"""Patches a job
Parameters
----------
req_op : str
The operation to perform on the job
req_path : str
Path parameter with the job to patch
req_value : str, optional
The value that needs to be modified
req_from : str, optional
The original path of the element
Returns
-------
dict of {str: str}
A dictionary of the form: {'status': str, 'message': str} in which
status is the status of the request ('error' or 'success') and message
is a human readable string with the error message in case that status
is 'error'.
"""
if req_op == 'remove':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 1:
return {'status': 'error',
'message': 'Incorrect path parameter: missing job id'}
# We have ensured that we only have one element on req_path
job_id = req_path[0]
try:
job = ProcessingJob(job_id)
except QiitaDBUnknownIDError as e:
return {'status': 'error',
'message': 'Incorrect path parameter: '
'%s is not a recognized job id' % job_id}
except Exception as e:
e = str(e)
if "invalid input syntax for uuid" in e:
return {'status': 'error',
'message': 'Incorrect path parameter: '
'%s is not a recognized job id' % job_id}
else:
return {'status': 'error',
'message': 'An error occured while accessing the '
'job: %s' % e}
job_status = job.status
if job_status == 'in_construction':
# A job that is in construction is in a workflow. Use the methods
# defined for workflows to keep everything consistent. This message
# should never be presented to the user, but rather to the
# developer if it makes a mistake during changes in the interface
return {'status': 'error',
'message': "Can't delete job %s. It is 'in_construction' "
"status. Please use /study/process/workflow/"
% job_id}
elif job_status == 'error':
# When the job is in error status, we just need to hide it
job.hide()
return {'status': 'success', 'message': ''}
else:
# In any other state, we currently fail. Adding the else here
# because it can be useful to have it for fixing issue #2307
return {'status': 'error',
'message': 'Only jobs in "error" status can be deleted.'}
else:
return {'status': 'error',
'message': 'Operation "%s" not supported. Current supported '
'operations: remove' % req_op}
| {
"content_hash": "7afbef8a2d1f81c0768799c72690ff67",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 79,
"avg_line_length": 34.629746835443036,
"alnum_prop": 0.5474732705839349,
"repo_name": "josenavas/QiiTa",
"id": "b082f807cf94790f71ba0729d429b829f7ffddf7",
"size": "11294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_pet/handlers/api_proxy/processing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "559042"
},
{
"name": "JavaScript",
"bytes": "81276"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "82663"
},
{
"name": "Python",
"bytes": "2294577"
},
{
"name": "SQLPL",
"bytes": "7501"
},
{
"name": "Shell",
"bytes": "3062"
}
],
"symlink_target": ""
} |
import six
import warnings
from datetime import datetime
from .. import errors
from .. import utils
class ContainerApiMixin(object):
@utils.check_resource
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0,
}
u = self._url("/containers/{0}/attach", container)
response = self._post(u, params=params, stream=stream)
return self._get_result(container, stream, response)
@utils.check_resource
def attach_socket(self, container, params=None, ws=False):
if params is None:
params = {
'stdout': 1,
'stderr': 1,
'stream': 1
}
if ws:
return self._attach_websocket(container, params)
u = self._url("/containers/{0}/attach", container)
return self._get_raw_response_socket(self.post(
u, None, params=self._attach_params(params), stream=True))
@utils.check_resource
def commit(self, container, repository=None, tag=None, message=None,
author=None, conf=None):
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author
}
u = self._url("/commit")
return self._result(self._post_json(u, data=conf, params=params),
json=True)
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
'size': 1 if size else 0,
'trunc_cmd': 1 if trunc else 0,
'since': since,
'before': before
}
if filters:
params['filters'] = utils.convert_filters(filters)
u = self._url("/containers/json")
res = self._result(self._get(u, params=params), True)
if quiet:
return [{'Id': x['Id']} for x in res]
if trunc:
for x in res:
x['Id'] = x['Id'][:12]
return res
@utils.check_resource
def copy(self, container, resource):
if utils.version_gte(self._version, '1.20'):
warnings.warn(
'Client.copy() is deprecated for API version >= 1.20, '
'please use get_archive() instead',
DeprecationWarning
)
res = self._post_json(
self._url("/containers/{0}/copy".format(container)),
data={"Resource": resource},
stream=True
)
self._raise_for_status(res)
return res.raw
def create_container(self, image, command=None, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None,
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None):
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if host_config and utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion(
'host_config is not supported in API < 1.15'
)
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open,
tty, mem_limit, ports, environment, dns, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
volume_driver
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
return utils.create_container_config(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
params = {
'name': name
}
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
if not kwargs:
kwargs = {}
if 'version' in kwargs:
raise TypeError(
"create_host_config() got an unexpected "
"keyword argument 'version'"
)
kwargs['version'] = self._version
return utils.create_host_config(*args, **kwargs)
@utils.check_resource
def diff(self, container):
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
@utils.check_resource
def export(self, container):
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
self._raise_for_status(res)
return res.raw
@utils.check_resource
@utils.minimum_version('1.20')
def get_archive(self, container, path):
params = {
'path': path
}
url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
res.raw,
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
@utils.check_resource
def inspect_container(self, container):
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
@utils.check_resource
def kill(self, container, signal=None):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
params['signal'] = signal
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all', since=None):
if utils.compare_version('1.11', self._version) >= 0:
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
'timestamps': timestamps and 1 or 0,
'follow': stream and 1 or 0,
}
if utils.compare_version('1.13', self._version) >= 0:
if tail != 'all' and (not isinstance(tail, int) or tail < 0):
tail = 'all'
params['tail'] = tail
if since is not None:
if utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'since is not supported in API < 1.19'
)
else:
if isinstance(since, datetime):
params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and since > 0):
params['since'] = since
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
return self.attach(
container,
stdout=stdout,
stderr=stderr,
stream=stream,
logs=True
)
@utils.check_resource
def pause(self, container):
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.check_resource
def port(self, container, private_port):
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
private_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = json_.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
if '/' in private_port:
return port_settings.get(private_port)
h_ports = port_settings.get(private_port + '/tcp')
if h_ports is None:
h_ports = port_settings.get(private_port + '/udp')
return h_ports
@utils.check_resource
@utils.minimum_version('1.20')
def put_archive(self, container, path, data):
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
self._raise_for_status(res)
return res.status_code == 200
@utils.check_resource
def remove_container(self, container, v=False, link=False, force=False):
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource
def rename(self, container, name):
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def resize(self, container, height, width):
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def restart(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=None, links=None, privileged=None,
dns=None, dns_search=None, volumes_from=None, network_mode=None,
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None):
if utils.compare_version('1.10', self._version) < 0:
if dns is not None:
raise errors.InvalidVersion(
'dns is only supported for API version >= 1.10'
)
if volumes_from is not None:
raise errors.InvalidVersion(
'volumes_from is only supported for API version >= 1.10'
)
if utils.compare_version('1.15', self._version) < 0:
if security_opt is not None:
raise errors.InvalidVersion(
'security_opt is only supported for API version >= 1.15'
)
if ipc_mode:
raise errors.InvalidVersion(
'ipc_mode is only supported for API version >= 1.15'
)
if utils.compare_version('1.17', self._version) < 0:
if read_only is not None:
raise errors.InvalidVersion(
'read_only is only supported for API version >= 1.17'
)
if pid_mode is not None:
raise errors.InvalidVersion(
'pid_mode is only supported for API version >= 1.17'
)
if utils.compare_version('1.18', self._version) < 0:
if ulimits is not None:
raise errors.InvalidVersion(
'ulimits is only supported for API version >= 1.18'
)
start_config_kwargs = dict(
binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports, links=links, dns=dns,
privileged=privileged, dns_search=dns_search, cap_add=cap_add,
cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
network_mode=network_mode, restart_policy=restart_policy,
extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
)
start_config = None
if any(v is not None for v in start_config_kwargs.values()):
if utils.compare_version('1.15', self._version) > 0:
warnings.warn(
'Passing host config parameters in start() is deprecated. '
'Please use host_config in create_container instead!',
DeprecationWarning
)
start_config = self.create_host_config(**start_config_kwargs)
url = self._url("/containers/{0}/start", container)
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource
def stats(self, container, decode=None, stream=True):
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
return self._result(self._get(url, params={'stream': False}),
json=True)
@utils.check_resource
def stop(self, container, timeout=10):
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
@utils.check_resource
def top(self, container, ps_args=None):
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True)
@utils.check_resource
def unpause(self, container):
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.check_resource
def wait(self, container, timeout=None):
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
if 'StatusCode' in json_:
return json_['StatusCode']
return -1
| {
"content_hash": "fe5991c4114f356e924c3ef7f9202676",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 79,
"avg_line_length": 37.48621553884712,
"alnum_prop": 0.5472354081700875,
"repo_name": "Sorsly/subtle",
"id": "00fa169601b12cbbf0362818fbcf4605747f7141",
"size": "14957",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/third_party/docker/docker/api/container.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
""" PyTorch MBART model."""
import copy
import math
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_mbart import MBartConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
_CONFIG_FOR_DOC = "MBartConfig"
_TOKENIZER_FOR_DOC = "MBartTokenizer"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
# SequenceClassification docstring
_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "hf-internal-testing/tiny-random-mbart"
_SEQ_CLASS_EXPECTED_LOSS = 0.69
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
# QuestionAsnwering docstring
_CHECKPOINT_FOR_QA = "hf-internal-testing/tiny-random-mbart"
_QA_EXPECTED_LOSS = 3.55
_QA_EXPECTED_OUTPUT = "'? Jim Henson was a'"
MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/mbart-large-cc25",
# See all MBART models at https://huggingface.co/models?filter=mbart
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
"""
Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
have a single `decoder_start_token_id` in contrast to other Bart-like models.
"""
prev_output_tokens = input_ids.clone()
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
prev_output_tokens[:, 0] = decoder_start_tokens
return prev_output_tokens
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart
class MBartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids.shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
).expand(bsz, -1)
return super().forward(positions + self.offset)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart
class MBartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class MBartEncoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MBartDecoderLayer(nn.Module):
def __init__(self, config: MBartConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MBartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = MBartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MBart
class MBartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class MBartPreTrainedModel(PreTrainedModel):
config_class = MBartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (MBartDecoder, MBartDecoder)):
module.gradient_checkpointing = value
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
MBART_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`MBartConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
MBART_GENERATION_EXAMPLE = r"""
Translation example:
```python
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro")
>>> example_english_phrase = "42 is the answer"
>>> inputs = tokenizer(example_english_phrase, return_tensors="pt")
>>> # Translate
>>> generated_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'42 este răspuns'
```
Mask filling example:
```python
>>> from transformers import MBartTokenizer, MBartForConditionalGeneration
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> # de_DE is the language symbol id <LID> for German
>>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['nett', 'sehr', 'ganz', 'nicht', 'so']
```
"""
MBART_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class MBartEncoder(MBartPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`MBartEncoderLayer`].
Args:
config: MBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def _backward_compatibility_gradient_checkpointing(self):
# Override to not delete the attribute from the config
if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
self.gradient_checkpointing_enable()
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class MBartDecoder(MBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MBartDecoderLayer`]
Args:
config: MBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare MBART Model outputting raw hidden-states without any specific head on top.",
MBART_START_DOCSTRING,
)
class MBartModel(MBartPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: MBartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = MBartEncoder(config, self.shared)
self.decoder = MBartDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Seq2SeqModelOutput, Tuple[torch.FloatTensor]]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# different to other models, MBart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The MBART Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING
)
class MBartForConditionalGeneration(MBartPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder.version",
r"decoder.version",
r"lm_head.weight",
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
]
def __init__(self, config: MBartConfig):
super().__init__(config)
self.model = MBartModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(MBART_GENERATION_EXAMPLE)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
@add_start_docstrings(
"""
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
MBART_START_DOCSTRING,
)
class MBartForSequenceClassification(MBartPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: MBartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = MBartModel(config)
self.classification_head = MBartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
# Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MBART_START_DOCSTRING,
)
class MBartForQuestionAnswering(MBartPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = MBartModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_QA,
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
expected_loss=_QA_EXPECTED_LOSS,
expected_output=_QA_EXPECTED_OUTPUT,
)
# Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward
def forward(
self,
input_ids: torch.Tensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[List[torch.FloatTensor]] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->MBart
class MBartDecoderWrapper(MBartPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = MBartDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25
class MBartForCausalLM(MBartPreTrainedModel):
_keys_to_ignore_on_load_missing = ["lm_head.weight"]
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = MBartDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import MBartTokenizer, MBartForCausalLM
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> model = MBartForCausalLM.from_pretrained("facebook/mbart-large-cc25", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| {
"content_hash": "d811c23351f44bee6f61facaa038df3d",
"timestamp": "",
"source": "github",
"line_count": 1894,
"max_line_length": 150,
"avg_line_length": 46.83368532206969,
"alnum_prop": 0.6292120897827582,
"repo_name": "huggingface/transformers",
"id": "5e4b2cfc3a62564402a7b3fca36d17a34fd79047",
"size": "89365",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/mbart/modeling_mbart.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
"""The event extraction worker."""
import copy
import os
import re
import time
from dfvfs.analyzer import analyzer as dfvfs_analyzer
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.analyzers import hashing_analyzer
from plaso.analyzers import manager as analyzers_manager
from plaso.containers import event_sources
from plaso.containers import events
from plaso.engine import extractors
from plaso.engine import logger
from plaso.lib import definitions
from plaso.lib import errors
class EventExtractionWorker(object):
"""Event extraction worker.
The event extraction worker determines which parsers are suitable for parsing
a particular file entry or data stream. The parsers extract relevant data from
file system and or file content data. All extracted data is passed to the
parser mediator for further processing.
Attributes:
last_activity_timestamp (int): timestamp received that indicates the last
time activity was observed.
processing_status (str): human readable status indication such as:
'Extracting', 'Hashing'.
"""
# NTFS metadata files that need special handling.
_METADATA_FILE_LOCATIONS_NTFS = frozenset([
'\\$AttrDef',
'\\$BadClus',
'\\$Bitmap',
'\\$Boot',
'\\$Extend\\$ObjId',
'\\$Extend\\$Quota',
'\\$Extend\\$Reparse',
'\\$Extend\\$RmMetadata\\$Repair',
'\\$Extend\\$RmMetadata\\$TxfLog\\$Tops',
'\\$Extend\\$UsnJrnl',
'\\$LogFile',
'\\$MFT',
'\\$MFTMirr',
'\\$Secure',
'\\$UpCase',
'\\$Volume',
])
# TSK metadata files that need special handling.
_METADATA_FILE_LOCATIONS_TSK = frozenset([
# NTFS
'/$AttrDef',
'/$BadClus',
'/$Bitmap',
'/$Boot',
'/$Extend/$ObjId',
'/$Extend/$Quota',
'/$Extend/$Reparse',
'/$Extend/$RmMetadata/$Repair',
'/$Extend/$RmMetadata/$TxfLog/$Tops',
'/$Extend/$UsnJrnl',
'/$LogFile',
'/$MFT',
'/$MFTMirr',
'/$Secure',
'/$UpCase',
'/$Volume',
# HFS+/HFSX
'/$ExtentsFile',
'/$CatalogFile',
'/$BadBlockFile',
'/$AllocationFile',
'/$AttributesFile',
])
# TODO: make this filtering solution more generic. Also see:
# https://github.com/log2timeline/plaso/issues/467
_CHROME_CACHE_DATA_FILE_RE = re.compile(r'^[fF]_[0-9a-fA-F]{6}$')
_FIREFOX_CACHE_DATA_FILE_RE = re.compile(r'^[0-9a-fA-F]{5}[dm][0-9]{2}$')
_FIREFOX_CACHE2_DATA_FILE_RE = re.compile(r'^[0-9a-fA-F]{40}$')
_TYPES_WITH_ROOT_METADATA = frozenset([
dfvfs_definitions.TYPE_INDICATOR_GZIP])
def __init__(self, parser_filter_expression=None):
"""Initializes an event extraction worker.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser
filter expression where presets have been expanded.
"""
super(EventExtractionWorker, self).__init__()
self._abort = False
self._analyzers = []
self._analyzers_profiler = None
self._event_extractor = extractors.EventExtractor(
parser_filter_expression=parser_filter_expression)
self._hasher_file_size_limit = None
self._path_spec_extractor = extractors.PathSpecExtractor()
self._process_archives = None
self._process_compressed_streams = None
self._processing_profiler = None
self.last_activity_timestamp = 0.0
self.processing_status = definitions.STATUS_INDICATOR_IDLE
def _AnalyzeDataStream(
self, file_entry, data_stream_name, display_name, event_data_stream):
"""Analyzes the contents of a specific data stream of a file entry.
The results of the analyzers are set in the event data stream as
attributes that are added to produced event objects. Note that some
file systems allow directories to have data streams, such as NTFS.
Args:
file_entry (dfvfs.FileEntry): file entry whose data stream is to be
analyzed.
data_stream_name (str): name of the data stream.
display_name (str): human readable representation of the file entry
currently being analyzed.
event_data_stream (EventDataStream): event data stream attribute
container.
Raises:
RuntimeError: if the file-like object cannot be retrieved from
the file entry.
"""
logger.debug('[AnalyzeDataStream] analyzing file: {0:s}'.format(
display_name))
if self._processing_profiler:
self._processing_profiler.StartTiming('analyzing')
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
if not file_object:
raise RuntimeError((
'Unable to retrieve file-like object for file entry: '
'{0:s}.').format(display_name))
try:
self._AnalyzeFileObject(file_object, display_name, event_data_stream)
finally:
file_object.close()
finally:
if self._processing_profiler:
self._processing_profiler.StopTiming('analyzing')
logger.debug('[AnalyzeDataStream] completed analyzing file: {0:s}'.format(
display_name))
def _AnalyzeFileObject(self, file_object, display_name, event_data_stream):
"""Processes a file-like object with analyzers.
Args:
file_object (dfvfs.FileIO): file-like object to process.
display_name (str): human readable representation of the file entry
currently being analyzed.
event_data_stream (EventDataStream): event data stream attribute
container.
"""
maximum_read_size = max([
analyzer_object.SIZE_LIMIT for analyzer_object in self._analyzers])
hashers_only = True
for analyzer_object in self._analyzers:
if not isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer):
hashers_only = False
break
file_size = file_object.get_size()
if (hashers_only and self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
return
file_object.seek(0, os.SEEK_SET)
data = file_object.read(maximum_read_size)
while data:
if self._abort:
break
for analyzer_object in self._analyzers:
if self._abort:
break
if (not analyzer_object.INCREMENTAL_ANALYZER and
file_size > analyzer_object.SIZE_LIMIT):
continue
if (isinstance(analyzer_object, hashing_analyzer.HashingAnalyzer) and
self._hasher_file_size_limit and
file_size > self._hasher_file_size_limit):
continue
self.processing_status = analyzer_object.PROCESSING_STATUS_HINT
if self._analyzers_profiler:
self._analyzers_profiler.StartTiming(analyzer_object.NAME)
try:
analyzer_object.Analyze(data)
finally:
if self._analyzers_profiler:
self._analyzers_profiler.StopTiming(analyzer_object.NAME)
self.last_activity_timestamp = time.time()
data = file_object.read(maximum_read_size)
for analyzer_object in self._analyzers:
for result in analyzer_object.GetResults():
logger.debug((
'[AnalyzeFileObject] attribute {0:s}:{1:s} calculated for '
'file: {2:s}.').format(
result.attribute_name, result.attribute_value, display_name))
setattr(event_data_stream, result.attribute_name,
result.attribute_value)
analyzer_object.Reset()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
def _CanSkipDataStream(self, file_entry, data_stream):
"""Determines if analysis and extraction of a data stream can be skipped.
This is used to prevent Plaso trying to run analyzers or extract content
from a pipe or socket it encounters while processing a mounted filesystem.
Args:
file_entry (dfvfs.FileEntry): file entry to consider for skipping.
data_stream (dfvfs.DataStream): data stream to consider for skipping.
Returns:
bool: True if the data stream can be skipped.
"""
if file_entry.IsFile():
return False
if data_stream.IsDefault():
return True
return False
def _CanSkipContentExtraction(self, file_entry):
"""Determines if content extraction of a file entry can be skipped.
Args:
file_entry (dfvfs.FileEntry): file entry of which to determine content
extraction can be skipped.
Returns:
bool: True if content extraction can be skipped.
"""
# TODO: make this filtering solution more generic. Also see:
# https://github.com/log2timeline/plaso/issues/467
location = getattr(file_entry.path_spec, 'location', None)
if not location:
return False
data_stream_name = getattr(file_entry.path_spec, 'data_stream', None)
if data_stream_name:
return False
file_system = file_entry.GetFileSystem()
path_segments = file_system.SplitPath(location)
if not path_segments:
return False
if self._CHROME_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-1]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Chrome Cache index file.
return True
elif self._FIREFOX_CACHE_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-4]
location_segments.append('_CACHE_MAP_')
location = file_system.JoinPath(location_segments)
cache_map_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(cache_map_path_spec):
# TODO: improve this check if "_CACHE_MAP_" is a Firefox Cache
# version 1 cache map file.
return True
elif self._FIREFOX_CACHE2_DATA_FILE_RE.match(path_segments[-1]):
location_segments = path_segments[:-2]
location_segments.append('index')
location = file_system.JoinPath(location_segments)
index_path_spec = path_spec_factory.Factory.NewPathSpec(
file_entry.type_indicator, location=location,
parent=file_entry.path_spec.parent)
if file_system.FileEntryExistsByPathSpec(index_path_spec):
# TODO: improve this check if "index" is a Firefox Cache version 2
# index file.
return True
elif len(path_segments) == 1 and path_segments[0].lower() in (
'hiberfil.sys', 'pagefile.sys', 'swapfile.sys'):
return True
elif (len(path_segments) == 4 and
path_segments[0].lower() == 'private' and
path_segments[1].lower() == 'var' and
path_segments[2].lower() == 'vm' and
path_segments[3].lower() in (
'sleepimage', 'swapfile0', 'swapfile1')):
return True
return False
def _ExtractContentFromDataStream(
self, mediator, file_entry, data_stream_name):
"""Extracts content from a data stream.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract its content.
data_stream_name (str): name of the data stream whose content is to be
extracted.
"""
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseDataStream(
mediator, file_entry, data_stream_name)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
self.last_activity_timestamp = time.time()
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
"""Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
# Do not extract metadata from the root file entry when it is virtual.
if file_entry.IsRoot() and file_entry.type_indicator not in (
self._TYPES_WITH_ROOT_METADATA):
return
# We always want to extract the file entry metadata but we only want
# to parse it once per file entry, so we only use it if we are
# processing the default data stream of regular files.
if data_stream and not data_stream.IsDefault():
return
display_name = mediator.GetDisplayName()
logger.debug(
'[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
if self._processing_profiler:
self._processing_profiler.StartTiming('extracting')
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
if self._processing_profiler:
self._processing_profiler.StopTiming('extracting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
def _GetArchiveTypes(self, mediator, path_spec):
"""Determines if a data stream contains an archive such as: TAR or ZIP.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS archive type indicators found in the data stream.
"""
try:
type_indicators = dfvfs_analyzer.Analyzer.GetArchiveTypeIndicators(
path_spec, resolver_context=mediator.resolver_context)
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine archive type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators
def _GetCompressedStreamTypes(self, mediator, path_spec):
"""Determines if a data stream contains a compressed stream such as: gzip.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS compressed stream type indicators found in
the data stream.
"""
try:
type_indicators = (
dfvfs_analyzer.Analyzer.GetCompressedStreamTypeIndicators(
path_spec, resolver_context=mediator.resolver_context))
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine compressed stream type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators
def _IsMetadataFile(self, file_entry):
"""Determines if the file entry is a metadata file.
Args:
file_entry (dfvfs.FileEntry): a file entry object.
Returns:
bool: True if the file entry is a metadata file.
"""
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_NTFS and
file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_NTFS):
return True
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK):
return True
return False
def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators):
"""Processes a data stream containing archive types such as: TAR or ZIP.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification.
type_indicators(list[str]): dfVFS archive type indicators found in
the data stream.
"""
number_of_type_indicators = len(type_indicators)
if number_of_type_indicators == 0:
return
self.processing_status = definitions.STATUS_INDICATOR_COLLECTING
if number_of_type_indicators > 1:
display_name = mediator.GetDisplayName()
logger.debug((
'Found multiple format type indicators: {0:s} for '
'archive file: {1:s}').format(type_indicators, display_name))
for type_indicator in type_indicators:
if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR:
archive_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location='/',
parent=path_spec)
elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP:
archive_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/',
parent=path_spec)
else:
archive_path_spec = None
warning_message = (
'unsupported archive format type indicator: {0:s}').format(
type_indicator)
mediator.ProduceExtractionWarning(
warning_message, path_spec=path_spec)
if archive_path_spec:
try:
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
[archive_path_spec], resolver_context=mediator.resolver_context)
for generated_path_spec in path_spec_generator:
if self._abort:
break
event_source = event_sources.FileEntryEventSource(
path_spec=generated_path_spec)
event_source.file_entry_type = (
dfvfs_definitions.FILE_ENTRY_TYPE_FILE)
mediator.ProduceEventSource(event_source)
self.last_activity_timestamp = time.time()
except (IOError, errors.MaximumRecursionDepth) as exception:
warning_message = (
'unable to process archive file with error: {0!s}').format(
exception)
mediator.ProduceExtractionWarning(
warning_message, path_spec=generated_path_spec)
def _ProcessCompressedStreamTypes(self, mediator, path_spec, type_indicators):
"""Processes a data stream containing compressed stream types such as: bz2.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification.
type_indicators(list[str]): dfVFS archive type indicators found in
the data stream.
"""
number_of_type_indicators = len(type_indicators)
if number_of_type_indicators == 0:
return
self.processing_status = definitions.STATUS_INDICATOR_COLLECTING
if number_of_type_indicators > 1:
display_name = mediator.GetDisplayName()
logger.debug((
'Found multiple format type indicators: {0:s} for '
'compressed stream file: {1:s}').format(
type_indicators, display_name))
for type_indicator in type_indicators:
if type_indicator == dfvfs_definitions.TYPE_INDICATOR_BZIP2:
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM,
compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2,
parent=path_spec)
elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_GZIP:
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec)
elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_XZ:
compressed_stream_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM,
compression_method=dfvfs_definitions.COMPRESSION_METHOD_XZ,
parent=path_spec)
else:
compressed_stream_path_spec = None
warning_message = (
'unsupported compressed stream format type indicators: '
'{0:s}').format(type_indicator)
mediator.ProduceExtractionWarning(
warning_message, path_spec=path_spec)
if compressed_stream_path_spec:
event_source = event_sources.FileEntryEventSource(
path_spec=compressed_stream_path_spec)
event_source.file_entry_type = dfvfs_definitions.FILE_ENTRY_TYPE_FILE
mediator.ProduceEventSource(event_source)
self.last_activity_timestamp = time.time()
def _ProcessDirectory(self, mediator, file_entry):
"""Processes a directory file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry of the directory.
"""
self.processing_status = definitions.STATUS_INDICATOR_COLLECTING
if self._processing_profiler:
self._processing_profiler.StartTiming('collecting')
for sub_file_entry in file_entry.sub_file_entries:
if self._abort:
break
try:
if not sub_file_entry.IsAllocated():
continue
except dfvfs_errors.BackEndError as exception:
warning_message = (
'unable to process directory entry: {0:s} with error: '
'{1!s}').format(sub_file_entry.name, exception)
mediator.ProduceExtractionWarning(
warning_message, path_spec=file_entry.path_spec)
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
event_source = event_sources.FileEntryEventSource(
path_spec=sub_file_entry.path_spec)
# TODO: move this into a dfVFS file entry property.
stat_object = sub_file_entry.GetStat()
if stat_object:
event_source.file_entry_type = stat_object.type
mediator.ProduceEventSource(event_source)
self.last_activity_timestamp = time.time()
if self._processing_profiler:
self._processing_profiler.StopTiming('collecting')
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
def _ProcessFileEntry(self, mediator, file_entry):
"""Processes a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry.
"""
display_name = mediator.GetDisplayName()
logger.debug(
'[ProcessFileEntry] processing file entry: {0:s}'.format(display_name))
reference_count = mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec)
try:
if self._IsMetadataFile(file_entry):
self._ProcessMetadataFile(mediator, file_entry)
else:
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
if self._CanSkipDataStream(file_entry, data_stream):
logger.debug((
'[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: '
'{2:s}').format(
data_stream.name, file_entry.type_indicator, display_name))
continue
self._ProcessFileEntryDataStream(mediator, file_entry, data_stream)
file_entry_processed = True
if not file_entry_processed:
# For when the file entry does not contain a data stream.
self._ProcessFileEntryDataStream(mediator, file_entry, None)
finally:
new_reference_count = (
mediator.resolver_context.GetFileObjectReferenceCount(
file_entry.path_spec))
if reference_count != new_reference_count:
# Clean up after parsers that do not call close explicitly.
if mediator.resolver_context.ForceRemoveFileObject(
file_entry.path_spec):
logger.warning(
'File-object not explicitly closed for file: {0:s}'.format(
display_name))
logger.debug(
'[ProcessFileEntry] done processing file entry: {0:s}'.format(
display_name))
def _ProcessFileEntryDataStream(self, mediator, file_entry, data_stream):
"""Processes a specific data stream of a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry containing the data stream.
data_stream (dfvfs.DataStream): data stream or None if the file entry
has no data stream.
"""
display_name = mediator.GetDisplayName()
data_stream_name = getattr(data_stream, 'name', '') or ''
logger.debug((
'[ProcessFileEntryDataStream] processing data stream: "{0:s}" of '
'file entry: {1:s}').format(data_stream_name, display_name))
mediator.ClearEventAttributes()
event_data_stream = None
if data_stream:
display_name = mediator.GetDisplayName()
path_spec = copy.deepcopy(file_entry.path_spec)
if not data_stream.IsDefault():
path_spec.data_stream = data_stream.name
event_data_stream = events.EventDataStream()
event_data_stream.path_spec = path_spec
if self._analyzers:
# Since AnalyzeDataStream generates event data stream attributes it
# needs to be called before producing events.
self._AnalyzeDataStream(
file_entry, data_stream.name, display_name, event_data_stream)
mediator.ProduceEventDataStream(event_data_stream)
self._ExtractMetadataFromFileEntry(mediator, file_entry, data_stream)
# Not every file entry has a data stream. In such cases we want to
# extract the metadata only.
if not data_stream:
return
# Determine if the content of the file entry should not be extracted.
skip_content_extraction = self._CanSkipContentExtraction(file_entry)
if skip_content_extraction:
display_name = mediator.GetDisplayName()
logger.debug(
'Skipping content extraction of: {0:s}'.format(display_name))
self.processing_status = definitions.STATUS_INDICATOR_IDLE
return
# TODO: merge with previous deepcopy
path_spec = copy.deepcopy(file_entry.path_spec)
if data_stream and not data_stream.IsDefault():
path_spec.data_stream = data_stream.name
archive_types = []
compressed_stream_types = []
if self._process_compressed_streams:
compressed_stream_types = self._GetCompressedStreamTypes(
mediator, path_spec)
if not compressed_stream_types:
archive_types = self._GetArchiveTypes(mediator, path_spec)
if archive_types:
if self._process_archives:
self._ProcessArchiveTypes(mediator, path_spec, archive_types)
if dfvfs_definitions.TYPE_INDICATOR_ZIP in archive_types:
# ZIP files are the base of certain file formats like docx.
self._ExtractContentFromDataStream(
mediator, file_entry, data_stream.name)
elif compressed_stream_types:
self._ProcessCompressedStreamTypes(
mediator, path_spec, compressed_stream_types)
else:
self._ExtractContentFromDataStream(
mediator, file_entry, data_stream.name)
def _ProcessMetadataFile(self, mediator, file_entry):
"""Processes a metadata file.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry of the metadata file.
"""
self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING
self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)
for data_stream in file_entry.data_streams:
if self._abort:
break
path_spec = copy.deepcopy(file_entry.path_spec)
if not data_stream.IsDefault():
path_spec.data_stream = data_stream.name
event_data_stream = events.EventDataStream()
event_data_stream.path_spec = path_spec
mediator.ProduceEventDataStream(event_data_stream)
self.last_activity_timestamp = time.time()
self._event_extractor.ParseMetadataFile(
mediator, file_entry, data_stream.name)
def _SetHashers(self, hasher_names_string):
"""Sets the hasher names.
Args:
hasher_names_string (str): comma separated names of the hashers
to enable, where 'none' disables the hashing analyzer.
"""
if not hasher_names_string or hasher_names_string == 'none':
return
analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(
'hashing')
analyzer_object.SetHasherNames(hasher_names_string)
self._analyzers.append(analyzer_object)
def _SetYaraRules(self, yara_rules_string):
"""Sets the Yara rules.
Args:
yara_rules_string (str): unparsed Yara rule definitions.
"""
if not yara_rules_string:
return
analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance(
'yara')
analyzer_object.SetRules(yara_rules_string)
self._analyzers.append(analyzer_object)
def GetAnalyzerNames(self):
"""Gets the names of the active analyzers.
Returns:
list[str]: names of active analyzers.
"""
return [analyzer_instance.NAME for analyzer_instance in self._analyzers]
def ProcessPathSpec(self, mediator, path_spec, excluded_find_specs=None):
"""Processes a path specification.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification.
excluded_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications
that are excluded from processing.
"""
self.last_activity_timestamp = time.time()
self.processing_status = definitions.STATUS_INDICATOR_RUNNING
file_entry = path_spec_resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=mediator.resolver_context)
if file_entry is None:
display_name = mediator.GetDisplayNameForPathSpec(path_spec)
logger.warning(
'Unable to open file entry with path spec: {0:s}'.format(
display_name))
self.processing_status = definitions.STATUS_INDICATOR_IDLE
return
for find_spec in excluded_find_specs or []:
if find_spec.CompareLocation(file_entry):
logger.info('Skipped: {0:s} because of exclusion filter.'.format(
file_entry.path_spec.location))
self.processing_status = definitions.STATUS_INDICATOR_IDLE
return
mediator.SetFileEntry(file_entry)
try:
if file_entry.IsDirectory():
self._ProcessDirectory(mediator, file_entry)
self._ProcessFileEntry(mediator, file_entry)
finally:
mediator.ResetFileEntry()
self.last_activity_timestamp = time.time()
self.processing_status = definitions.STATUS_INDICATOR_IDLE
# TODO: move the functionality of this method into the constructor.
def SetExtractionConfiguration(self, configuration):
"""Sets the extraction configuration settings.
Args:
configuration (ExtractionConfiguration): extraction configuration.
"""
self._hasher_file_size_limit = configuration.hasher_file_size_limit
self._SetHashers(configuration.hasher_names_string)
self._process_archives = configuration.process_archives
self._process_compressed_streams = configuration.process_compressed_streams
self._SetYaraRules(configuration.yara_rules_string)
def SetAnalyzersProfiler(self, analyzers_profiler):
"""Sets the parsers profiler.
Args:
analyzers_profiler (AnalyzersProfiler): analyzers profile.
"""
self._analyzers_profiler = analyzers_profiler
def SetProcessingProfiler(self, processing_profiler):
"""Sets the processing profiler.
Args:
processing_profiler (ProcessingProfiler): processing profile.
"""
self._processing_profiler = processing_profiler
def SignalAbort(self):
"""Signals the extraction worker to abort."""
self._abort = True
| {
"content_hash": "f7518b0e3db33b6bab8b9655937f1b11",
"timestamp": "",
"source": "github",
"line_count": 933,
"max_line_length": 80,
"avg_line_length": 35.69560557341908,
"alnum_prop": 0.6733125150132117,
"repo_name": "Onager/plaso",
"id": "08660dee784b56834a3a34530048806c747fb800",
"size": "33328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/engine/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1172"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "4816953"
},
{
"name": "Shell",
"bytes": "22891"
}
],
"symlink_target": ""
} |
from flask import request, url_for
def url_for_other_page(**kwargs):
"""Returns a URL aimed at the current request endpoint and query args."""
url_for_args = request.args.copy()
if 'pjax' in url_for_args:
url_for_args.pop('_pjax')
for key, value in kwargs.items():
url_for_args[key] = value
return url_for(request.endpoint, **url_for_args)
| {
"content_hash": "66ff8003af02fffc85b6ebbd94b078ad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 34.36363636363637,
"alnum_prop": 0.6507936507936508,
"repo_name": "cburmeister/flask-bones",
"id": "aeb16e9949d13589124534b2fda552c77c6c52c9",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "214"
},
{
"name": "Dockerfile",
"bytes": "248"
},
{
"name": "HTML",
"bytes": "15308"
},
{
"name": "JavaScript",
"bytes": "568"
},
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "27219"
}
],
"symlink_target": ""
} |
from django.db import models
class Repairs(models.Model):
id = models.AutoField(primary_key = True)
category = models.CharField(max_length = 255)
department = models.CharField(max_length = 255)
item = models.CharField(max_length = 255)
address = models.CharField(max_length = 255)
details = models.CharField(max_length = 255)
customer = models.CharField(max_length = 255)
telephone = models.CharField(max_length = 11)
pubtime = models.DateTimeField(auto_now_add = True)
status = models.CharField(max_length = 9)
assessment = models.CharField(max_length = 255)
class Meta:
db_table = 'swan_repairs'
def __str__(self):
return self.id
| {
"content_hash": "3e160d4d56e9032d1b73db64bb1748bb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 36.94736842105263,
"alnum_prop": 0.6780626780626781,
"repo_name": "huaiping/pandora",
"id": "a3051d73556ada58a820c9374fe51e83d0aba636",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repairs/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21967"
},
{
"name": "JavaScript",
"bytes": "136848"
},
{
"name": "Python",
"bytes": "25954"
}
],
"symlink_target": ""
} |
import re
import os
import click
import pytest
def test_prefixes(runner):
@click.command()
@click.option('++foo', is_flag=True, help='das foo')
@click.option('--bar', is_flag=True, help='das bar')
def cli(foo, bar):
click.echo('foo=%s bar=%s' % (foo, bar))
result = runner.invoke(cli, ['++foo', '--bar'])
assert not result.exception
assert result.output == 'foo=True bar=True\n'
result = runner.invoke(cli, ['--help'])
assert re.search(r'\+\+foo\s+das foo', result.output) is not None
assert re.search(r'--bar\s+das bar', result.output) is not None
def test_invalid_option(runner):
try:
@click.command()
@click.option('foo')
def cli(foo):
pass
except TypeError as e:
assert 'No options defined but a name was passed (foo).' \
in str(e)
else:
assert False, 'Expected a type error because of an invalid option.'
def test_invalid_nargs(runner):
try:
@click.command()
@click.option('--foo', nargs=-1)
def cli(foo):
pass
except TypeError as e:
assert 'Options cannot have nargs < 0' in str(e)
else:
assert False, 'Expected a type error because of an invalid option.'
def test_nargs_tup_composite_mult(runner):
@click.command()
@click.option('--item', type=(str, int), multiple=True)
def copy(item):
for item in item:
click.echo('name=%s id=%d' % item)
result = runner.invoke(copy, ['--item', 'peter', '1', '--item', 'max', '2'])
assert not result.exception
assert result.output.splitlines() == [
'name=peter id=1',
'name=max id=2',
]
def test_counting(runner):
@click.command()
@click.option('-v', count=True, help='Verbosity',
type=click.IntRange(0, 3))
def cli(v):
click.echo('verbosity=%d' % v)
result = runner.invoke(cli, ['-vvv'])
assert not result.exception
assert result.output == 'verbosity=3\n'
result = runner.invoke(cli, ['-vvvv'])
assert result.exception
assert 'Invalid value for "-v": 4 is not in the valid range of 0 to 3.' \
in result.output
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == 'verbosity=0\n'
result = runner.invoke(cli, ['--help'])
assert re.search('-v\s+Verbosity', result.output) is not None
@pytest.mark.parametrize('unknown_flag', ['--foo', '-f'])
def test_unknown_options(runner, unknown_flag):
@click.command()
def cli():
pass
result = runner.invoke(cli, [unknown_flag])
assert result.exception
assert 'no such option: {0}'.format(unknown_flag) in result.output
def test_multiple_required(runner):
@click.command()
@click.option('-m', '--message', multiple=True, required=True)
def cli(message):
click.echo('\n'.join(message))
result = runner.invoke(cli, ['-m', 'foo', '-mbar'])
assert not result.exception
assert result.output == 'foo\nbar\n'
result = runner.invoke(cli, [])
assert result.exception
assert 'Error: Missing option "-m" / "--message".' in result.output
def test_multiple_envvar(runner):
@click.command()
@click.option('--arg', multiple=True)
def cmd(arg):
click.echo('|'.join(arg))
result = runner.invoke(cmd, [], auto_envvar_prefix='TEST',
env={'TEST_ARG': 'foo bar baz'})
assert not result.exception
assert result.output == 'foo|bar|baz\n'
@click.command()
@click.option('--arg', multiple=True, envvar='X')
def cmd(arg):
click.echo('|'.join(arg))
result = runner.invoke(cmd, [], env={'X': 'foo bar baz'})
assert not result.exception
assert result.output == 'foo|bar|baz\n'
@click.command()
@click.option('--arg', multiple=True, type=click.Path())
def cmd(arg):
click.echo('|'.join(arg))
result = runner.invoke(cmd, [], auto_envvar_prefix='TEST',
env={'TEST_ARG': 'foo%sbar' % os.path.pathsep})
assert not result.exception
assert result.output == 'foo|bar\n'
def test_multiple_default_help(runner):
@click.command()
@click.option("--arg1", multiple=True, default=('foo', 'bar'),
show_default=True)
@click.option("--arg2", multiple=True, default=(1, 2), type=int,
show_default=True)
def cmd(arg, arg2):
pass
result = runner.invoke(cmd, ['--help'])
assert not result.exception
assert "foo, bar" in result.output
assert "1, 2" in result.output
def test_multiple_default_type(runner):
@click.command()
@click.option("--arg1", multiple=True, default=('foo', 'bar'))
@click.option("--arg2", multiple=True, default=(1, "a"))
def cmd(arg1, arg2):
assert all(isinstance(e[0],str) for e in arg1)
assert all(isinstance(e[1],str) for e in arg1)
assert all(isinstance(e[0],int) for e in arg2)
assert all(isinstance(e[1],str) for e in arg2)
result = runner.invoke(cmd, "--arg1 a b --arg1 test 1 --arg2 2 two --arg2 4 four".split())
assert not result.exception
def test_nargs_envvar(runner):
@click.command()
@click.option('--arg', nargs=2)
def cmd(arg):
click.echo('|'.join(arg))
result = runner.invoke(cmd, [], auto_envvar_prefix='TEST',
env={'TEST_ARG': 'foo bar'})
assert not result.exception
assert result.output == 'foo|bar\n'
@click.command()
@click.option('--arg', nargs=2, multiple=True)
def cmd(arg):
for item in arg:
click.echo('|'.join(item))
result = runner.invoke(cmd, [], auto_envvar_prefix='TEST',
env={'TEST_ARG': 'x 1 y 2'})
assert not result.exception
assert result.output == 'x|1\ny|2\n'
def test_custom_validation(runner):
def validate_pos_int(ctx, value):
if value < 0:
raise click.BadParameter('Value needs to be positive')
return value
@click.command()
@click.option('--foo', callback=validate_pos_int, default=1)
def cmd(foo):
click.echo(foo)
result = runner.invoke(cmd, ['--foo', '-1'])
assert 'Invalid value for "--foo": Value needs to be positive' \
in result.output
result = runner.invoke(cmd, ['--foo', '42'])
assert result.output == '42\n'
def test_winstyle_options(runner):
@click.command()
@click.option('/debug;/no-debug', help='Enables or disables debug mode.')
def cmd(debug):
click.echo(debug)
result = runner.invoke(cmd, ['/debug'], help_option_names=['/?'])
assert result.output == 'True\n'
result = runner.invoke(cmd, ['/no-debug'], help_option_names=['/?'])
assert result.output == 'False\n'
result = runner.invoke(cmd, [], help_option_names=['/?'])
assert result.output == 'False\n'
result = runner.invoke(cmd, ['/?'], help_option_names=['/?'])
assert '/debug; /no-debug Enables or disables debug mode.' in result.output
assert '/? Show this message and exit.' in result.output
def test_legacy_options(runner):
@click.command()
@click.option('-whatever')
def cmd(whatever):
click.echo(whatever)
result = runner.invoke(cmd, ['-whatever', '42'])
assert result.output == '42\n'
result = runner.invoke(cmd, ['-whatever=23'])
assert result.output == '23\n'
def test_missing_choice(runner):
@click.command()
@click.option('--foo', type=click.Choice(['foo', 'bar']),
required=True)
def cmd(foo):
click.echo(foo)
result = runner.invoke(cmd)
assert result.exit_code == 2
assert 'Error: Missing option "--foo". Choose from foo, bar.' \
in result.output
def test_multiline_help(runner):
@click.command()
@click.option('--foo', help="""
hello
i am
multiline
""")
def cmd(foo):
click.echo(foo)
result = runner.invoke(cmd, ['--help'])
assert result.exit_code == 0
out = result.output.splitlines()
assert ' --foo TEXT hello' in out
assert ' i am' in out
assert ' multiline' in out
def test_argument_custom_class(runner):
class CustomArgument(click.Argument):
def get_default(self, ctx):
'''a dumb override of a default value for testing'''
return 'I am a default'
@click.command()
@click.argument('testarg', cls=CustomArgument, default='you wont see me')
def cmd(testarg):
click.echo(testarg)
result = runner.invoke(cmd)
assert 'I am a default' in result.output
assert 'you wont see me' not in result.output
def test_option_custom_class(runner):
class CustomOption(click.Option):
def get_help_record(self, ctx):
'''a dumb override of a help text for testing'''
return ('--help', 'I am a help text')
@click.command()
@click.option('--testoption', cls=CustomOption, help='you wont see me')
def cmd(testoption):
click.echo(testoption)
result = runner.invoke(cmd, ['--help'])
assert 'I am a help text' in result.output
assert 'you wont see me' not in result.output
def test_aliases_for_flags(runner):
@click.command()
@click.option('--warnings/--no-warnings', ' /-W', default=True)
def cli(warnings):
click.echo(warnings)
result = runner.invoke(cli, ['--warnings'])
assert result.output == 'True\n'
result = runner.invoke(cli, ['--no-warnings'])
assert result.output == 'False\n'
result = runner.invoke(cli, ['-W'])
assert result.output == 'False\n'
@click.command()
@click.option('--warnings/--no-warnings', '-w', default=True)
def cli_alt(warnings):
click.echo(warnings)
result = runner.invoke(cli_alt, ['--warnings'])
assert result.output == 'True\n'
result = runner.invoke(cli_alt, ['--no-warnings'])
assert result.output == 'False\n'
result = runner.invoke(cli_alt, ['-w'])
assert result.output == 'True\n'
| {
"content_hash": "9a4bece7c0e38f68ed0ce8996efe9853",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 94,
"avg_line_length": 30.42296072507553,
"alnum_prop": 0.5981132075471698,
"repo_name": "andela-ooladayo/click",
"id": "919f89854b72c16fe44cd64af42833f5d0d3b3d7",
"size": "10094",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Python",
"bytes": "283958"
}
],
"symlink_target": ""
} |
"""
This is a set of utilities for faster development with Django templates.
render_to_response() and render_to_string() use RequestContext internally.
The app_prefixed_loader is a template loader that loads directly from the app's
'templates' folder when you specify an app prefix ('app/template.html').
The JSONResponse() function automatically converts a given Python object into
JSON and returns it as an HttpResponse.
"""
from django.conf import settings
from django.http import HttpResponse
from django.template import RequestContext, loader, \
TemplateDoesNotExist, Library, Node, Variable, generic_tag_compiler
from django.utils.functional import curry
from inspect import getargspec
from ragendja.apputils import get_app_dirs
import os
class Library(Library):
def context_tag(self, func):
params, xx, xxx, defaults = getargspec(func)
class ContextNode(Node):
def __init__(self, vars_to_resolve):
self.vars_to_resolve = map(Variable, vars_to_resolve)
def render(self, context):
resolved_vars = [var.resolve(context) for var in self.vars_to_resolve]
return func(context, *resolved_vars)
params = params[1:]
compile_func = curry(generic_tag_compiler, params, defaults, getattr(func, "_decorated_function", func).__name__, ContextNode)
compile_func.__doc__ = func.__doc__
self.tag(getattr(func, "_decorated_function", func).__name__, compile_func)
return func
# The following defines a template loader that loads templates from a specific
# app based on the prefix of the template path:
# get_template("app/template.html") => app/templates/template.html
# This keeps the code DRY and prevents name clashes.
def app_prefixed_loader(template_name, template_dirs=None):
packed = template_name.split('/', 1)
if len(packed) == 2 and packed[0] in app_template_dirs:
path = os.path.join(app_template_dirs[packed[0]], packed[1])
try:
return (open(path).read().decode(settings.FILE_CHARSET), path)
except IOError:
pass
raise TemplateDoesNotExist, template_name
app_prefixed_loader.is_usable = True
def render_to_string(request, template_name, data=None):
return loader.render_to_string(template_name, data,
context_instance=RequestContext(request))
def render_to_response(request, template_name, data=None, mimetype=None):
if mimetype is None:
mimetype = settings.DEFAULT_CONTENT_TYPE
original_mimetype = mimetype
if mimetype == 'application/xhtml+xml':
# Internet Explorer only understands XHTML if it's served as text/html
if request.META.get('HTTP_ACCEPT').find(mimetype) == -1:
mimetype = 'text/html'
response = HttpResponse(render_to_string(request, template_name, data),
content_type='%s; charset=%s' % (mimetype, settings.DEFAULT_CHARSET))
if original_mimetype == 'application/xhtml+xml':
# Since XHTML is served with two different MIME types, depending on the
# browser, we need to tell proxies to serve different versions.
from django.utils.cache import patch_vary_headers
patch_vary_headers(response, ['User-Agent'])
return response
def JSONResponse(pyobj):
from ragendja.json import JSONResponse as real_class
global JSONResponse
JSONResponse = real_class
return JSONResponse(pyobj)
def TextResponse(string=''):
return HttpResponse(string,
content_type='text/plain; charset=%s' % settings.DEFAULT_CHARSET)
# This is needed by app_prefixed_loader.
app_template_dirs = get_app_dirs('templates')
| {
"content_hash": "7d98ade7ee6b1662ee547db43c3bdad1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 134,
"avg_line_length": 41.07865168539326,
"alnum_prop": 0.7004923413566739,
"repo_name": "stonezdj/forzdj",
"id": "5cf2d994e436083f68735216dca4b550224f7e85",
"size": "3680",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "common/appenginepatch/ragendja/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21230"
},
{
"name": "JavaScript",
"bytes": "12164"
},
{
"name": "Python",
"bytes": "246618"
}
],
"symlink_target": ""
} |
import numpy as np
import argparse
from asetk.format.qe import QECube
# Define command line parser
parser = argparse.ArgumentParser(
description='Converts intermediate cube-like format of pp.x to Gaussian cube format.')
parser.add_argument('--version', action='version', version='%(prog)s 13.10.2016')
parser.add_argument(
'cubes',
nargs='+',
metavar='FILENAMES',
help='QE cube file(s) to be converted. Conversion simply adds .cube extension.')
args = parser.parse_args()
for fname in args.cubes:
print("Reading QE cube file {}".format(fname))
c = QECube.from_file(fname, read_data=True)
outname = "{}.cube".format(fname)
print("Writting Gaussian cube file {}".format(outname))
c.write_cube_file(outname)
print("")
print("Job done")
| {
"content_hash": "0f5786c4c0f2fd848ab7c2fa7466ac71",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 90,
"avg_line_length": 30.076923076923077,
"alnum_prop": 0.69693094629156,
"repo_name": "ltalirz/asetk",
"id": "65132a679e09a74623b73947ae68920286f06f63",
"size": "804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/qe-convert-cube.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170380"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
} |
from beginners import *
# That was well worth the effort of the licence, wasn't it? :-)
| {
"content_hash": "fc6986079638fea71445c00d7c24c423",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 63,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6847826086956522,
"repo_name": "DarkArcher117/Block-Blaster-3000",
"id": "5421a1453fc45b14a3ae4db991d83af8611ff99b",
"size": "1669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LiveWires 2.0/build/lib/livewires/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138460"
}
],
"symlink_target": ""
} |
import unittest
import time
from requests.exceptions import RequestException
from six.moves import xrange
from .testcases import WebserverTestCase
from locust.stats import RequestStats, StatsEntry, global_stats
from locust.core import HttpLocust, Locust, TaskSet, task
from locust.inspectlocust import get_task_ratio_dict
from locust.rpc.protocol import Message
class TestRequestStats(unittest.TestCase):
def setUp(self):
self.stats = RequestStats()
self.stats.start_time = time.time()
self.s = StatsEntry(self.stats, "test_entry", "GET")
self.s.log(45, 0)
self.s.log(135, 0)
self.s.log(44, 0)
self.s.log_error(Exception("dummy fail"))
self.s.log_error(Exception("dummy fail"))
self.s.log(375, 0)
self.s.log(601, 0)
self.s.log(35, 0)
self.s.log(79, 0)
self.s.log_error(Exception("dummy fail"))
def test_percentile(self):
s = StatsEntry(self.stats, "percentile_test", "GET")
for x in xrange(100):
s.log(x, 0)
self.assertEqual(s.get_response_time_percentile(0.5), 50)
self.assertEqual(s.get_response_time_percentile(0.6), 60)
self.assertEqual(s.get_response_time_percentile(0.95), 95)
def test_median(self):
self.assertEqual(self.s.median_response_time, 79)
def test_total_rps(self):
self.assertEqual(self.s.total_rps, 7)
def test_current_rps(self):
self.stats.last_request_timestamp = int(time.time()) + 4
self.assertEqual(self.s.current_rps, 3.5)
self.stats.last_request_timestamp = int(time.time()) + 25
self.assertEqual(self.s.current_rps, 0)
def test_num_reqs_fails(self):
self.assertEqual(self.s.num_requests, 7)
self.assertEqual(self.s.num_failures, 3)
def test_avg(self):
self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429)
def test_reset(self):
self.s.reset()
self.s.log(756, 0)
self.s.log_error(Exception("dummy fail after reset"))
self.s.log(85, 0)
self.assertEqual(self.s.total_rps, 2)
self.assertEqual(self.s.num_requests, 2)
self.assertEqual(self.s.num_failures, 1)
self.assertEqual(self.s.avg_response_time, 420.5)
self.assertEqual(self.s.median_response_time, 85)
def test_reset_min_response_time(self):
self.s.reset()
self.s.log(756, 0)
self.assertEqual(756, self.s.min_response_time)
def test_aggregation(self):
s1 = StatsEntry(self.stats, "aggregate me!", "GET")
s1.log(12, 0)
s1.log(12, 0)
s1.log(38, 0)
s1.log_error("Dummy exzeption")
s2 = StatsEntry(self.stats, "aggregate me!", "GET")
s2.log_error("Dummy exzeption")
s2.log_error("Dummy exzeption")
s2.log(12, 0)
s2.log(99, 0)
s2.log(14, 0)
s2.log(55, 0)
s2.log(38, 0)
s2.log(55, 0)
s2.log(97, 0)
s = StatsEntry(self.stats, "GET", "")
s.extend(s1, full_request_history=True)
s.extend(s2, full_request_history=True)
self.assertEqual(s.num_requests, 10)
self.assertEqual(s.num_failures, 3)
self.assertEqual(s.median_response_time, 38)
self.assertEqual(s.avg_response_time, 43.2)
def test_error_grouping(self):
# reset stats
self.stats = RequestStats()
s = StatsEntry(self.stats, "/some-path", "GET")
s.log_error(Exception("Exception!"))
s.log_error(Exception("Exception!"))
self.assertEqual(1, len(self.stats.errors))
self.assertEqual(2, list(self.stats.errors.values())[0].occurences)
s.log_error(Exception("Another exception!"))
s.log_error(Exception("Another exception!"))
s.log_error(Exception("Third exception!"))
self.assertEqual(3, len(self.stats.errors))
def test_error_grouping_errors_with_memory_addresses(self):
# reset stats
self.stats = RequestStats()
class Dummy(object):
pass
s = StatsEntry(self.stats, "/", "GET")
s.log_error(Exception("Error caused by %r" % Dummy()))
s.log_error(Exception("Error caused by %r" % Dummy()))
self.assertEqual(1, len(self.stats.errors))
def test_serialize_through_message(self):
"""
Serialize a RequestStats instance, then serialize it through a Message,
and unserialize the whole thing again. This is done "IRL" when stats are sent
from slaves to master.
"""
s1 = StatsEntry(self.stats, "test", "GET")
s1.log(10, 0)
s1.log(20, 0)
s1.log(40, 0)
u1 = StatsEntry.unserialize(s1.serialize())
data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
u1 = StatsEntry.unserialize(data)
self.assertEqual(20, u1.median_response_time)
class TestRequestStatsWithWebserver(WebserverTestCase):
def test_request_stats_content_length(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast")
self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response"))
locust.client.get("/ultra_fast")
self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response"))
def test_request_stats_no_content_length(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
path = "/no_content_length"
r = l.client.get(path)
self.assertEqual(global_stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header"))
def test_request_stats_no_content_length_streaming(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
l = MyLocust()
path = "/no_content_length"
r = l.client.get(path, stream=True)
self.assertEqual(0, global_stats.get(path, "GET").avg_content_length)
def test_request_stats_named_endpoint(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast", name="my_custom_name")
self.assertEqual(1, global_stats.get("my_custom_name", "GET").num_requests)
def test_request_stats_query_variables(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.get("/ultra_fast?query=1")
self.assertEqual(1, global_stats.get("/ultra_fast?query=1", "GET").num_requests)
def test_request_stats_put(self):
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
locust = MyLocust()
locust.client.put("/put")
self.assertEqual(1, global_stats.get("/put", "PUT").num_requests)
def test_request_connection_error(self):
class MyLocust(HttpLocust):
host = "http://localhost:1"
locust = MyLocust()
response = locust.client.get("/", timeout=0.1)
self.assertEqual(response.status_code, 0)
self.assertEqual(1, global_stats.get("/", "GET").num_failures)
self.assertEqual(0, global_stats.get("/", "GET").num_requests)
def test_max_requests(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
self.client.get("/ultra_fast")
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
min_wait = 1
max_wait = 1
try:
from locust.exception import StopLocust
global_stats.clear_all()
global_stats.max_requests = 2
l = MyLocust()
self.assertRaises(StopLocust, lambda: l.task_set(l).run())
self.assertEqual(2, global_stats.num_requests)
global_stats.clear_all()
global_stats.max_requests = 2
self.assertEqual(0, global_stats.num_requests)
l.run()
self.assertEqual(2, global_stats.num_requests)
finally:
global_stats.clear_all()
global_stats.max_requests = None
def test_max_requests_failed_requests(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
self.client.get("/ultra_fast")
self.client.get("/fail")
self.client.get("/fail")
class MyLocust(HttpLocust):
host = "http://127.0.0.1:%i" % self.port
task_set = MyTaskSet
min_wait = 1
max_wait = 1
try:
from locust.exception import StopLocust
global_stats.clear_all()
global_stats.max_requests = 3
l = MyLocust()
self.assertRaises(StopLocust, lambda: l.task_set(l).run())
self.assertEqual(1, global_stats.num_requests)
self.assertEqual(2, global_stats.num_failures)
global_stats.clear_all()
global_stats.max_requests = 2
self.assertEqual(0, global_stats.num_requests)
self.assertEqual(0, global_stats.num_failures)
l.run()
self.assertEqual(1, global_stats.num_requests)
self.assertEqual(1, global_stats.num_failures)
finally:
global_stats.clear_all()
global_stats.max_requests = None
class MyTaskSet(TaskSet):
@task(75)
def root_task(self):
pass
@task(25)
class MySubTaskSet(TaskSet):
@task
def task1(self):
pass
@task
def task2(self):
pass
class TestInspectLocust(unittest.TestCase):
def test_get_task_ratio_dict_relative(self):
ratio = get_task_ratio_dict([MyTaskSet])
self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"])
self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"])
self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"])
self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task1"]["ratio"])
self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task2"]["ratio"])
def test_get_task_ratio_dict_total(self):
ratio = get_task_ratio_dict([MyTaskSet], total=True)
self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"])
self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"])
self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"])
self.assertEqual(0.125, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task1"]["ratio"])
self.assertEqual(0.125, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task2"]["ratio"])
| {
"content_hash": "eddb65fbfe5c6de36157cc225d57d36d",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 139,
"avg_line_length": 37.0625,
"alnum_prop": 0.5871128073133931,
"repo_name": "cgoldberg/locust",
"id": "ff8bcd2ec9ed66d5cc74fb7458b94c5e24596622",
"size": "11267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locust/test/test_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5727"
},
{
"name": "HTML",
"bytes": "12194"
},
{
"name": "JavaScript",
"bytes": "9311"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "155300"
}
],
"symlink_target": ""
} |
import networkx as nx
import community as co
from edgesense.network.utils import extract_dpsg
from datetime import datetime
import logging
# build the deparallelized subnetworks to use for metrics
# compute the metrics by timestep on the deparallelized network
# Cluster, K-Cores, PageRank,
# betweennessCentralityCount, betweennessCentralityEffort
# graphDensity, modularityCount, modularityEffort
# averageClusteringCoefficient
# Indegree, Outdegree
def extract_network_metrics(mdg, ts, team=True):
met = {}
dsg = extract_dpsg(mdg, ts, team)
if team :
pre = 'full:'
else:
pre = 'user:'
# avoid trying to compute metrics for
# the case of empty networks
if dsg.number_of_nodes()==0:
return met
met[pre+'nodes_count'] = dsg.number_of_nodes()
met[pre+'edges_count'] = dsg.number_of_edges()
met[pre+'density'] = nx.density(dsg)
met[pre+'betweenness'] = nx.betweenness_centrality(dsg)
met[pre+'avg_betweenness'] = float(sum(met[pre+'betweenness'].values()))/float(len(met[pre+'betweenness'].values()))
met[pre+'betweenness_count'] = nx.betweenness_centrality(dsg, weight='count')
met[pre+'avg_betweenness_count'] = float(sum(met[pre+'betweenness_count'].values()))/float(len(met[pre+'betweenness_count'].values()))
met[pre+'betweenness_effort'] = nx.betweenness_centrality(dsg, weight='effort')
met[pre+'avg_betweenness_effort'] = float(sum(met[pre+'betweenness_effort'].values()))/float(len(met[pre+'betweenness_effort'].values()))
met[pre+'in_degree'] = dsg.in_degree()
met[pre+'avg_in_degree'] = float(sum(met[pre+'in_degree'].values()))/float(len(met[pre+'in_degree'].values()))
met[pre+'out_degree'] = dsg.out_degree()
met[pre+'avg_out_degree'] = float(sum(met[pre+'out_degree'].values()))/float(len(met[pre+'out_degree'].values()))
met[pre+'degree'] = dsg.degree()
met[pre+'avg_degree'] = float(sum(met[pre+'degree'].values()))/float(len(met[pre+'degree'].values()))
met[pre+'degree_count'] = dsg.degree(weight='count')
met[pre+'avg_degree_count'] = float(sum(met[pre+'degree_count'].values()))/float(len(met[pre+'degree_count'].values()))
met[pre+'degree_effort'] = dsg.degree(weight='effort')
met[pre+'avg_degree_effort'] = float(sum(met[pre+'degree_effort'].values()))/float(len(met[pre+'degree_effort'].values()))
met[pre+'pagerank'] = nx.pagerank(dsg, weight = 'count') # added by Alberto
usg = dsg.to_undirected()
met [pre+'clustering'] = nx.clustering(usg) # added by Alberto
louvain = extract_louvain_modularity(usg)
met[pre+'partitions'] = louvain['partitions']
met[pre+'louvain_modularity'] = louvain['modularity']
connected_components = nx.connected_component_subgraphs(usg)
shortest_paths = [nx.average_shortest_path_length(g) for g in connected_components if g.size()>1]
if len(shortest_paths) > 0:
met[pre+'avg_distance'] = max(shortest_paths)
else:
met[pre+'avg_distance'] = None
return met
def extract_louvain_modularity(g):
met = {}
usg = g.copy()
isolated = nx.isolates(usg)
usg.remove_nodes_from(isolated)
dendo = co.generate_dendrogram(usg)
if len(dendo)>0 and isinstance(dendo, list):
partition = co.partition_at_level(dendo, len(dendo) - 1 )
met['partitions'] = {}
for com in set(partition.values()):
members = [nodes for nodes in partition.keys() if partition[nodes] == com]
for member in members:
met['partitions'][member] = com
met['modularity'] = co.modularity(partition, usg)
# for node in isolated:
# met['partitions'][node] = None
else:
met['partitions'] = None
met['modularity'] = None
return met | {
"content_hash": "04753d3003ce50fe459c5cb25de56a6a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 141,
"avg_line_length": 46.80246913580247,
"alnum_prop": 0.6560274333948826,
"repo_name": "albertocottica/microfoundations-community-management",
"id": "5bd1fabbcd88966a3266103c0d5e219c50793496",
"size": "3791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/edgesense copy/python/edgesense/network/metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "232307"
},
{
"name": "HTML",
"bytes": "145987"
},
{
"name": "JavaScript",
"bytes": "100835"
},
{
"name": "Makefile",
"bytes": "1003"
},
{
"name": "PHP",
"bytes": "32971"
},
{
"name": "Python",
"bytes": "123116"
},
{
"name": "Stata",
"bytes": "15298"
},
{
"name": "TeX",
"bytes": "416879"
}
],
"symlink_target": ""
} |
import rospy
import tf
from geometry_msgs.msg import PointStamped
from std_msgs.msg import Int8
class HattedHeadDetector():
def __init__(self):
# Initialize hat stuff
self.have_hat_center = False
rospy.Subscriber('/center_of_hat', PointStamped, self.hat_callback)
# TF Listener
self.listener = tf.TransformListener()
rospy.sleep(1.0) # give listener time to initialize
# Loop
self.pub = rospy.Publisher('/hatted_head', Int8)
r = rospy.Rate(10)
while not rospy.is_shutdown():
hatted_head = Int8()
hatted_head.data = 0
if self.have_hat_center:
self.get_head_names()
hat_wrt_heads = self.hat_wrt_heads()
is_on_heads = []
for hat_wrt_head in hat_wrt_heads:
is_on_head = self.detect_hatted_head(hat_wrt_head)
is_on_heads.append(is_on_head)
if sum(is_on_heads) > 1:
rospy.logerr('More than one head is wearing the hat!')
elif sum(is_on_heads) == 1:
which_point = is_on_heads.index(True)
which_head = int(self.joints[which_point][-1])
hatted_head.data = which_head
rospy.loginfo('HAT ON HEAD #{0}!'.format(which_head))
else:
rospy.loginfo('Nope, no heads are hatted...')
self.pub.publish(hatted_head)
r.sleep()
def hat_callback(self, hat_center):
""" Get hat center locations. """
self.hat_center = hat_center
self.have_hat_center = True
def get_head_names(self):
""" Get tf frame_id for each head. """
frames = self.listener.getFrameStrings()
self.joints = []
for n in range(1,10):
suffix = '_' + str(n)
joint = 'head' + suffix # generate joint name
if joint in frames:
self.joints.append(joint)
else:
# print 'This many skeletons: ' + str(n-1)
break
def hat_wrt_heads(self):
""" Transform hat location to all head frames. """
hat_wrt_heads = []
for joint in self.joints:
hat_wrt_head = self.hat_wrt_frame(joint)
hat_wrt_heads.append(hat_wrt_head)
return hat_wrt_heads
def hat_wrt_frame(self, frame):
""" Transform hat location to given frame. """
self.listener.waitForTransform(self.hat_center.header.frame_id, frame, rospy.Time(0), rospy.Duration(1.0))
hat_wrt_frame = self.listener.transformPoint(frame, self.hat_center)
return hat_wrt_frame
def detect_hatted_head(self, hat_wrt_head):
""" Use spatial reasoning to determine whether each head is hatted. """
is_on_head = hat_wrt_head.point.y > 0.0 and hat_wrt_head.point.y < 0.20 and abs(hat_wrt_head.point.x) < 0.10 and abs(hat_wrt_head.point.z) < 0.10
return is_on_head
if __name__ == '__main__':
rospy.init_node('is_hat_on_head')
detector = HattedHeadDetector()
rospy.spin()
# Outline:
# +Subscribe to hat location
# +Get hat location in head frame(s)
# +Persistently check which head numbers exist
# +For each extant head, do the transform
# +Check for hat wearing
# +Throw error if there are multiple wearers
# +Publish an Int8 of who is wearing the hat
| {
"content_hash": "bb37ce47f6b95ef82b9e6ad25eabe0a0",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 147,
"avg_line_length": 30.71578947368421,
"alnum_prop": 0.6710075394105551,
"repo_name": "OSUrobotics/privacy-interfaces",
"id": "f66d14d2cb6e534d1fb8a0422dc05f74ccf91846",
"size": "2942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specifying/hat_detector/scripts/is_hat_on_head.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83936"
},
{
"name": "C++",
"bytes": "1360235"
},
{
"name": "CMake",
"bytes": "46381"
},
{
"name": "Matlab",
"bytes": "2021"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "364838"
}
],
"symlink_target": ""
} |
import os
import shutil
import struct
import zlib
from synctools import command
__all__ = ['Patch']
# Taken from mutagen/_util.py
bitswap = ''.join([chr(sum([((val >> i) & 1) << (7-i) for i in range(8)]))
for val in range(256)])
class Patch(command.SynctoolsCommand):
title = 'Length-patch OGG'
description = 'trick In The Groove r21 into accepting long songs'
fields = [
{
'name': 'length',
'title': 'Patched length in seconds',
'input': command.FieldInputs.text,
'default': 105,
'type': command.FieldTypes.between(0, 600),
},
{
'name': 'backup_audio',
'title': 'Back up audio files?',
'input': command.FieldInputs.boolean,
'default': True,
'type': command.FieldTypes.yesno,
},
]
def run(self, simfile):
super(Patch, self).run(simfile)
# Get audio path
ogg = os.path.join(os.path.dirname(simfile.filename), simfile['MUSIC'])
if not os.path.splitext(ogg)[1].lower() == '.ogg':
self.log.error('Only OGG is supported')
return
# Backup audio
if self.options['backup_audio']:
shutil.copy2(ogg, ogg + '~')
# Get audio data
with open(ogg, 'rb') as audiofile:
audiodata = audiofile.read()
# Find last page by the 4-byte header + version 0 + last page indicator
lpindex = audiodata.rfind('OggS\x00')
if lpindex == -1:
self.log.error('Unable to find last OGG page')
return
if not ord(audiodata[lpindex + 5]) & 4:
self.log.error('There is something very wrong with this OGG')
return
lp = audiodata[lpindex:]
# TODO: don't assume 44.1kHz below
# Get original length & insert new length
patchlength = self.options['length']
oldlength = struct.unpack('<q', lp[6:14])[0]
oldlength /= 44100.
lp = lp[:6] + struct.pack('<q', patchlength * 44100) + lp[14:]
# Insert new CRC sum
# Note: Python computes CRC backwards relative to OGG
crc = (~zlib.crc32((lp[:22] + '\x00' * 4 + lp[26:]).translate(bitswap),
-1)) & 0xffffffff
lp = (lp[:22] + struct.pack('>I', crc).translate(bitswap) + lp[26:])
# Write new audio file
with open(ogg, 'wb') as audiofile:
audiofile.write(audiodata[:lpindex])
audiofile.write(lp)
self.log.info('Patched audio length from %s seconds to %s seconds' %
(oldlength, patchlength)) | {
"content_hash": "dfb08b2dcf95095fd7dba4cd04fadba8",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 80,
"avg_line_length": 33.6125,
"alnum_prop": 0.5399776868724433,
"repo_name": "garcia/synctools",
"id": "85877d03ccd872a6090c88dfcfd4dd5974816a4b",
"size": "2711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synctools/commands/patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8456"
},
{
"name": "Python",
"bytes": "53560"
},
{
"name": "Shell",
"bytes": "2865"
},
{
"name": "VBScript",
"bytes": "80"
}
],
"symlink_target": ""
} |
"""
This module is included coefficient functions.
The coeficient functions are learning rate and neighborhood function, etc.
Copylight Masato MASUDA. 2017.23.AUG.
"""
import numpy as np
import sys
def alpha_func (alpha_max=0.1, alpha_min=0.01, epochs=100):
return np.linspace(alpha_max, alpha_min, epochs)
# return float((alpha_max-alpha_min) * (1.0 - (float)(epoch)/(float)(epochs)) + alpha_min)
def sigma_func (sigma_max=10., sigma_min=1., epochs=100):
return np.linspace(sigma_max, sigma_min, epochs)
# return float((sigma_max-sigma_min) * (1.0 - (float)(epoch)/(float)(epochs)) + sigma_min)
def RP_alpha_func (alpha_max=[0.5, 0.3, 0.1], alpha_min=[0.1, 0.05, 0.01], epochs=10):
max_len = len(alpha_max)
min_len = len(alpha_min)
amax = alpha_max
amin = alpha_min
if max_len == min_len:
tbl = np.array([amin]*epochs,dtype=float).T
for i in range(len(alpha_max)):
tbl[i] = np.linspace(amax[i], amin[i], epochs)
else :
print sys._getframe().f_code.co_name
print "missmatch the alpha size: len(alpha_max)=%d, len(alpha_min)=%d" % (max_len, min_len)
print " alpha_max: ", amax
print " alpha_min: ", amin
print sys._getframe().f_code.co_name, "return None"
tbl=None
return tbl
def RP_sigma_func (sigma_max=[10, 7, 4], sigma_min=[3, 2, 1], epochs=10):
max_len = len(sigma_max)
min_len = len(sigma_min)
smax = sigma_max
smin = sigma_min
if max_len == min_len:
tbl = np.array([smin]*epochs, dtype=float).T
for i in range(len(smax)):
tbl[i] = np.linspace(smax[i], smin[i], epochs)
else :
print sys._getframe().f_code.co_name
print "missmatch the sigma size: len(sigma_max)=%d, len(igma_min)=%d" % (max_len, min_len)
print " sigma_max: ", smax
print " sigma_min: ", smin
print "RP_sigma_func return None"
tbl = None
return tbl
def GAUSSIAN_func (som, dist, epoch, epochs, sigma_tbl=None):
if dist is 0.0:
return 1
else :
if sigma_tbl is None:
return np.exp(-(dist**2.)/(2.*som.sigma_tbl[epoch]))
else :
return np.exp(-(dist**2.)/(2.*sigma_tbl[epoch]))
def SQUARE_func (som, y, x, dist):
if (dist>=y):
if (dist>=x):
return 1.0
else :
return 0.0
def neighborhood_func (som, epoch, n, y, x, epochs=100, topology="Gaussian", sigma_tbl=None):
if topology in {"Gaussian", "GAUSSIAN", "gaussian", "Hexagon", "HEXAGON", "hexagon"}:
if topology in {"Hexagon", "HEXAGON", "hexagon"}:
_y = som.ary[y]
_x = som.ary[x]
_min_y = som.ary[som.min_nodes_y[n]]
_min_x = som.ary[som.min_nodes_x[n]]
else :
_y = y
_x = x
_min_y = som.min_nodes_y[n]
_min_x = som.min_nodes_x[n]
dist = (float)(_min_y-_y)**2. + (float)(_min_x-_x)**2.
dist = np.sqrt(dist)
return GAUSSIAN_func (som, dist, epoch, epochs, sigma_tbl)
elif topology in {"Threshold", "THRESHOLD", "threshold", "th"}:
x = abs(som.min_nodes_x[n]-x)
y = abs(som.min_nodes_y[n]-y)
dist = (int)(sigma_func(som.sigma_max, som.sigma_min, epochs))
return SQUARE_func(som, y, x, dist(epoch))
else:
print topology
print "The specified topology type is missing."
print "Please set it with \"GAUSSIAN\" or \"SQUARE\"."
return 0.0
| {
"content_hash": "9eabd293809efafeba053621fe25657d",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 94,
"avg_line_length": 31.838383838383837,
"alnum_prop": 0.6294416243654822,
"repo_name": "kinect110/RPSOM",
"id": "33519fe68342b4998efbcee1f9cafa778dff141e",
"size": "3200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SOM/Coefficients.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111677"
}
],
"symlink_target": ""
} |
import os
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django-core.core"
else:
return "django-core.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep -v '^@@|^[-+]#|^..POT-Creation' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip()) - 4
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
contrib_dirs = _get_locale_dirs(resources, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating main en catalog")
call_command('makemessages', locale=['en'])
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
# Contrib catalogs
for name, dir_ in contrib_dirs:
os.chdir(os.path.join(dir_, '..'))
print("Updating en catalog in %s" % dir_)
if name.endswith('-js'):
call_command('makemessages', locale=['en'], domain='djangojs')
else:
call_command('makemessages', locale=['en'])
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
languages = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in languages:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = OptionParser(usage="usage: %prog [options] cmd")
parser.add_option("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_option("-l", "--languages", action='append',
help="limit operation to the specified languages")
options, args = parser.parse_args()
if not args:
parser.print_usage()
exit(1)
if args[0] in RUNABLE_SCRIPTS:
eval(args[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
| {
"content_hash": "372233e87b1483ebbf44b4933f939d7d",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 113,
"avg_line_length": 38.68674698795181,
"alnum_prop": 0.582061663033323,
"repo_name": "mbox/django",
"id": "4b7f856b2ded626a1d548cb6a754d7714bf862f4",
"size": "7169",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/manage_translations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9362347"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from unittest import mock
from manila.scheduler.filters import base
from manila import test
class TestBaseFilter(test.TestCase):
def setUp(self):
super(TestBaseFilter, self).setUp()
self.filter = base.BaseFilter()
def test_filter_one_is_called(self):
filters = [1, 2, 3, 4]
filter_properties = {'x': 'y'}
side_effect = lambda value, props: value in [2, 3] # noqa: E731
self.mock_object(self.filter,
'_filter_one',
mock.Mock(side_effect=side_effect))
result = list(self.filter.filter_all(filters, filter_properties))
self.assertEqual([2, 3], result)
class FakeExtension(object):
def __init__(self, plugin):
self.plugin = plugin
class BaseFakeFilter(base.BaseFilter):
pass
class FakeFilter1(BaseFakeFilter):
"""Derives from BaseFakeFilter and has a fake entry point defined.
Entry point is returned by fake ExtensionManager.
Should be included in the output of all_classes.
"""
class FakeFilter2(BaseFakeFilter):
"""Derives from BaseFakeFilter but has no entry point.
Should be not included in all_classes.
"""
class FakeFilter3(base.BaseFilter):
"""Does not derive from BaseFakeFilter.
Should not be included.
"""
class FakeFilter4(BaseFakeFilter):
"""Derives from BaseFakeFilter and has an entry point.
Should be included.
"""
class FakeFilter5(BaseFakeFilter):
"""Derives from BaseFakeFilter but has no entry point.
Should not be included.
"""
run_filter_once_per_request = True
class FakeExtensionManager(list):
def __init__(self, namespace):
classes = [FakeFilter1, FakeFilter3, FakeFilter4]
exts = map(FakeExtension, classes)
super(FakeExtensionManager, self).__init__(exts)
self.namespace = namespace
class TestBaseFilterHandler(test.TestCase):
def setUp(self):
super(TestBaseFilterHandler, self).setUp()
self.mock_object(base.base_handler.extension,
'ExtensionManager',
FakeExtensionManager)
self.handler = base.BaseFilterHandler(BaseFakeFilter, 'fake_filters')
def test_get_all_classes(self):
# In order for a FakeFilter to be returned by get_all_classes, it has
# to comply with these rules:
# * It must be derived from BaseFakeFilter
# AND
# * It must have a python entrypoint assigned (returned by
# FakeExtensionManager)
expected = [FakeFilter1, FakeFilter4]
result = self.handler.get_all_classes()
self.assertEqual(expected, result)
def _get_filtered_objects(self, filter_classes, index=0):
filter_objs_initial = [1, 2, 3, 4]
filter_properties = {'x': 'y'}
return self.handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties,
index)
@mock.patch.object(FakeFilter4, 'filter_all')
@mock.patch.object(FakeFilter3, 'filter_all', return_value=None)
def test_get_filtered_objects_return_none(self, fake3_filter_all,
fake4_filter_all):
filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4]
result, last_filter = self._get_filtered_objects(filter_classes)
self.assertIsNone(result)
self.assertFalse(fake4_filter_all.called)
self.assertEqual('FakeFilter3', last_filter)
def test_get_filtered_objects(self):
filter_objs_expected = [1, 2, 3, 4]
filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4]
result, last_filter = self._get_filtered_objects(filter_classes)
self.assertEqual(filter_objs_expected, result)
self.assertEqual('FakeFilter4', last_filter)
def test_get_filtered_objects_with_filter_run_once(self):
filter_objs_expected = [1, 2, 3, 4]
filter_classes = [FakeFilter5]
with mock.patch.object(FakeFilter5, 'filter_all',
return_value=filter_objs_expected
) as fake5_filter_all:
result, last_filter = self._get_filtered_objects(filter_classes)
self.assertEqual(filter_objs_expected, result)
self.assertEqual(1, fake5_filter_all.call_count)
result, last_filter = self._get_filtered_objects(
filter_classes, index=1)
self.assertEqual(filter_objs_expected, result)
self.assertEqual(1, fake5_filter_all.call_count)
result, last_filter = self._get_filtered_objects(
filter_classes, index=2)
self.assertEqual(filter_objs_expected, result)
self.assertEqual(1, fake5_filter_all.call_count)
| {
"content_hash": "f7808764ae80f9d3ade4fee79df07328",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 77,
"avg_line_length": 33.398648648648646,
"alnum_prop": 0.6210803155978151,
"repo_name": "openstack/manila",
"id": "ae3aa562743c3ffa03df7e71e4f4242aa7bcbffc",
"size": "5534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/scheduler/filters/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "12728998"
},
{
"name": "Shell",
"bytes": "107601"
}
],
"symlink_target": ""
} |
import json
import os
from gcp_census.model.table import Table
from gcp_census.model.view import View
class FilesystemModelProvider(object):
def __init__(self, model_directory):
self.model_directory = model_directory
def list_tables(self):
for table in self.__list_files('.json'):
with open(table[2]) as json_file:
json_dict = json.load(json_file)
yield Table(table[0], table[1], json_dict)
def list_views(self):
for view in self.__list_files('.sql'):
with open(view[2]) as view_file:
content = view_file.readlines()
yield View(view[0], view[1], content)
def list_groups(self):
for group_dir in os.listdir(self.model_directory):
subdirectory = os.path.join(self.model_directory, group_dir)
if os.path.isdir(subdirectory):
yield group_dir
def __list_files(self, extension):
for group_dir in os.listdir(self.model_directory):
subdirectory = os.path.join(self.model_directory, group_dir)
if os.path.isdir(subdirectory):
for model_file in os.listdir(subdirectory):
if model_file.endswith(extension):
model_name = os.path.splitext(model_file)[0]
filename = os.path.join(self.model_directory, group_dir,
model_file)
yield group_dir, model_name, filename
| {
"content_hash": "bb995efc6dad67b28d50ab755c195d20",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 38.1,
"alnum_prop": 0.571522309711286,
"repo_name": "ocadotechnology/gcp-census",
"id": "ae7dcace332fcdc2e16e5dcd0409be8446a542df",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcp_census/model/filesystem_model_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82965"
}
],
"symlink_target": ""
} |
"""
Routers provide a convenient and consistent way of automatically
determining the URL conf for your API.
They are used by simply instantiating a Router class, and then registering
all the required ViewSets with that router.
For example, you might have a `urls.py` that looks something like this:
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'user')
router.register('accounts', AccountViewSet, 'account')
urlpatterns = router.urls
"""
from __future__ import unicode_literals
import itertools
import warnings
from collections import OrderedDict, namedtuple
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from rest_framework import views
from rest_framework.compat import NoReverseMatch
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.schemas import SchemaGenerator, SchemaView
from rest_framework.settings import api_settings
from rest_framework.urlpatterns import format_suffix_patterns
Route = namedtuple('Route', ['url', 'mapping', 'name', 'initkwargs'])
DynamicDetailRoute = namedtuple('DynamicDetailRoute', ['url', 'name', 'initkwargs'])
DynamicListRoute = namedtuple('DynamicListRoute', ['url', 'name', 'initkwargs'])
def escape_curly_brackets(url_path):
"""
Double brackets in regex of url_path for escape string formatting
"""
if ('{' and '}') in url_path:
url_path = url_path.replace('{', '{{').replace('}', '}}')
return url_path
def replace_methodname(format_string, methodname):
"""
Partially format a format_string, swapping out any
'{methodname}' or '{methodnamehyphen}' components.
"""
methodnamehyphen = methodname.replace('_', '-')
ret = format_string
ret = ret.replace('{methodname}', methodname)
ret = ret.replace('{methodnamehyphen}', methodnamehyphen)
return ret
def flatten(list_of_lists):
"""
Takes an iterable of iterables, returns a single iterable containing all items
"""
return itertools.chain(*list_of_lists)
class BaseRouter(object):
def __init__(self):
self.registry = []
def register(self, prefix, viewset, base_name=None):
if base_name is None:
base_name = self.get_default_base_name(viewset)
self.registry.append((prefix, viewset, base_name))
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
raise NotImplementedError('get_default_base_name must be overridden')
def get_urls(self):
"""
Return a list of URL patterns, given the registered viewsets.
"""
raise NotImplementedError('get_urls must be overridden')
@property
def urls(self):
if not hasattr(self, '_urls'):
self._urls = self.get_urls()
return self._urls
class SimpleRouter(BaseRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes.
# Generated using @list_route decorator
# on methods of the viewset.
DynamicListRoute(
url=r'^{prefix}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes.
# Generated using @detail_route decorator on methods of the viewset.
DynamicDetailRoute(
url=r'^{prefix}/{lookup}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
def __init__(self, trailing_slash=True):
self.trailing_slash = trailing_slash and '/' or ''
super(SimpleRouter, self).__init__()
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
queryset = getattr(viewset, 'queryset', None)
assert queryset is not None, '`base_name` argument not specified, and could ' \
'not automatically determine the name from the viewset, as ' \
'it does not have a `.queryset` attribute.'
return queryset.model._meta.object_name.lower()
def get_routes(self, viewset):
"""
Augment `self.routes` with any dynamically generated routes.
Returns a list of the Route namedtuple.
"""
# converting to list as iterables are good for one pass, known host needs to be checked again and again for
# different functions.
known_actions = list(flatten([route.mapping.values() for route in self.routes if isinstance(route, Route)]))
# Determine any `@detail_route` or `@list_route` decorated methods on the viewset
detail_routes = []
list_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
detail = getattr(attr, 'detail', True)
if httpmethods:
# checking method names against the known actions list
if methodname in known_actions:
raise ImproperlyConfigured('Cannot use @detail_route or @list_route '
'decorators on method "%s" '
'as it is an existing route' % methodname)
httpmethods = [method.lower() for method in httpmethods]
if detail:
detail_routes.append((httpmethods, methodname))
else:
list_routes.append((httpmethods, methodname))
def _get_dynamic_routes(route, dynamic_routes):
ret = []
for httpmethods, methodname in dynamic_routes:
method_kwargs = getattr(viewset, methodname).kwargs
initkwargs = route.initkwargs.copy()
initkwargs.update(method_kwargs)
url_path = initkwargs.pop("url_path", None) or methodname
url_path = escape_curly_brackets(url_path)
url_name = initkwargs.pop("url_name", None) or url_path
ret.append(Route(
url=replace_methodname(route.url, url_path),
mapping={httpmethod: methodname for httpmethod in httpmethods},
name=replace_methodname(route.name, url_name),
initkwargs=initkwargs,
))
return ret
ret = []
for route in self.routes:
if isinstance(route, DynamicDetailRoute):
# Dynamic detail routes (@detail_route decorator)
ret += _get_dynamic_routes(route, detail_routes)
elif isinstance(route, DynamicListRoute):
# Dynamic list routes (@list_route decorator)
ret += _get_dynamic_routes(route, list_routes)
else:
# Standard route
ret.append(route)
return ret
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions,
return a new mapping which only includes any mappings that
are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_url_kwarg}>{lookup_value})'
# Use `pk` as default field, unset set. Default regex should not
# consume `.json` style suffixes and should break at '/' boundaries.
lookup_field = getattr(viewset, 'lookup_field', 'pk')
lookup_url_kwarg = getattr(viewset, 'lookup_url_kwarg', None) or lookup_field
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_url_kwarg=lookup_url_kwarg,
lookup_value=lookup_value
)
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
"""
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be bound
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
# Build the url pattern
regex = route.url.format(
prefix=prefix,
lookup=lookup,
trailing_slash=self.trailing_slash
)
# If there is no prefix, the first part of the url is probably
# controlled by project's urls.py and the router is in an app,
# so a slash in the beginning will (A) cause Django to give
# warnings and (B) generate URLS that will require using '//'.
if not prefix and regex[:2] == '^/':
regex = '^' + regex[2:]
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
class APIRootView(views.APIView):
"""
The default basic root view for DefaultRouter
"""
_ignore_model_permissions = True
exclude_from_schema = True
api_root_dict = None
def get(self, request, *args, **kwargs):
# Return a plain {"name": "hyperlink"} response.
ret = OrderedDict()
namespace = request.resolver_match.namespace
for key, url_name in self.api_root_dict.items():
if namespace:
url_name = namespace + ':' + url_name
try:
ret[key] = reverse(
url_name,
args=args,
kwargs=kwargs,
request=request,
format=kwargs.get('format', None)
)
except NoReverseMatch:
# Don't bail out if eg. no list routes exist, only detail routes.
continue
return Response(ret)
class DefaultRouter(SimpleRouter):
"""
The default router extends the SimpleRouter, but also adds in a default
API root view, and adds format suffix patterns to the URLs.
"""
include_root_view = True
include_format_suffixes = True
root_view_name = 'api-root'
default_schema_renderers = None
APIRootView = APIRootView
APISchemaView = SchemaView
SchemaGenerator = SchemaGenerator
def __init__(self, *args, **kwargs):
if 'schema_title' in kwargs:
warnings.warn(
"Including a schema directly via a router is now deprecated. "
"Use `get_schema_view()` instead.",
DeprecationWarning
)
if 'schema_renderers' in kwargs:
assert 'schema_title' in kwargs, 'Missing "schema_title" argument.'
if 'schema_url' in kwargs:
assert 'schema_title' in kwargs, 'Missing "schema_title" argument.'
self.schema_title = kwargs.pop('schema_title', None)
self.schema_url = kwargs.pop('schema_url', None)
self.schema_renderers = kwargs.pop('schema_renderers', self.default_schema_renderers)
if 'root_renderers' in kwargs:
self.root_renderers = kwargs.pop('root_renderers')
else:
self.root_renderers = list(api_settings.DEFAULT_RENDERER_CLASSES)
super(DefaultRouter, self).__init__(*args, **kwargs)
def get_schema_root_view(self, api_urls=None):
"""
Return a schema root view.
"""
schema_generator = self.SchemaGenerator(
title=self.schema_title,
url=self.schema_url,
patterns=api_urls
)
return self.APISchemaView.as_view(
renderer_classes=self.schema_renderers,
schema_generator=schema_generator,
)
def get_api_root_view(self, api_urls=None):
"""
Return a basic root view.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
return self.APIRootView.as_view(api_root_dict=api_root_dict)
def get_urls(self):
"""
Generate the list of URL patterns, including a default root view
for the API, and appending `.json` style format suffixes.
"""
urls = super(DefaultRouter, self).get_urls()
if self.include_root_view:
if self.schema_title:
view = self.get_schema_root_view(api_urls=urls)
else:
view = self.get_api_root_view(api_urls=urls)
root_url = url(r'^$', view, name=self.root_view_name)
urls.append(root_url)
if self.include_format_suffixes:
urls = format_suffix_patterns(urls)
return urls
| {
"content_hash": "0adc65002953abcbba3dbf518dc269f3",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 116,
"avg_line_length": 36.56565656565657,
"alnum_prop": 0.5869475138121547,
"repo_name": "atombrella/django-rest-framework",
"id": "87e58b015ad5855691cdbf34f6146e4ba4c181bf",
"size": "14480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework/routers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39327"
},
{
"name": "HTML",
"bytes": "81231"
},
{
"name": "JavaScript",
"bytes": "17284"
},
{
"name": "Python",
"bytes": "1129682"
}
],
"symlink_target": ""
} |
from email.utils import getaddresses
from django.utils.encoding import force_unicode
from django.core.mail import EmailMessage
class KikiMessage(EmailMessage):
"""
This is similar to a django EmailMessage, but overrides recipients() to return arbitrary recipients rather than a concatenation of the to, cc, and bcc attributes.
"""
_recipients = None
def recipients(self):
return self._recipients or []
@classmethod
def from_python_message(cls, msg):
"""Given a python :class:`email.Message` object, return a corresponding class:`kiki.message.KikiMessage` instance."""
payload = msg.get_payload()
if msg.is_multipart():
# TODO: is this the correct way to determine "body" vs "attachments"?
body = payload[0]
attachments = payload[1:]
else:
body = payload
attachments = None
# For now, let later header values override earlier ones. TODO: This should be more complex.
headers = dict([(k.lower(), v) for k, v in msg.items() if k not in ('to', 'cc', 'bcc')])
to = [addr[1] for addr in getaddresses(msg.get_all('to', []))]
cc = [addr[1] for addr in getaddresses(msg.get_all('cc', []))]
bcc = [addr[1] for addr in getaddresses(msg.get_all('bcc', []))]
kwargs = {
'subject': headers.pop('subject', ''),
'body': body,
'from_email': headers.pop('from', ''),
'to': to,
'bcc': bcc,
'attachments': attachments,
'headers': headers,
'cc': cc
}
return cls(**kwargs) | {
"content_hash": "00314e3135e5f9d15bd1981339f772a7",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 163,
"avg_line_length": 31.304347826086957,
"alnum_prop": 0.6680555555555555,
"repo_name": "melinath/django-kiki",
"id": "df538d01f198651b0d3daafda68434a20a3f7c1f",
"size": "1440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kiki/message.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "100811"
}
],
"symlink_target": ""
} |
import pytest
import fiona
from fiona.drvsupport import supported_drivers, _driver_supports_mode
from fiona.errors import DriverError
from .conftest import driver_extensions
from fiona.env import GDALVersion
def test_bounds_point():
g = {'type': 'Point', 'coordinates': [10, 10]}
assert fiona.bounds(g) == (10, 10, 10, 10)
def test_bounds_line():
g = {'type': 'LineString', 'coordinates': [[0, 0], [10, 10]]}
assert fiona.bounds(g) == (0, 0, 10, 10)
def test_bounds_polygon():
g = {'type': 'Polygon', 'coordinates': [[[0, 0], [10, 10], [10, 0]]]}
assert fiona.bounds(g) == (0, 0, 10, 10)
def test_bounds_z():
g = {'type': 'Point', 'coordinates': [10, 10, 10]}
assert fiona.bounds(g) == (10, 10, 10, 10)
ignore_write_drivers = set(['CSV', 'GPX', 'GPSTrackMaker', 'DXF', 'DGN', 'MapInfo File'])
write_drivers = [driver for driver, raw in supported_drivers.items() if _driver_supports_mode(driver, 'w') and driver not in ignore_write_drivers]
@pytest.mark.parametrize('driver', write_drivers)
def test_bounds(tmpdir, driver):
"""Test if bounds are correctly calculated after writing
"""
if driver == 'BNA' and GDALVersion.runtime() < GDALVersion(2, 0):
# BNA driver segfaults with gdal 1.11
return
extension = driver_extensions.get(driver, "bar")
path = str(tmpdir.join('foo.{}'.format(extension)))
with fiona.open(path, 'w',
driver=driver,
schema={'geometry': 'Point',
'properties': [('title', 'str')]},
fiona_force_driver=True) as c:
c.writerecords([{'geometry': {'type': 'Point', 'coordinates': (1.0, 10.0)}, 'properties': {'title': 'One'}}])
try:
bounds = c.bounds
assert bounds == (1.0, 10.0, 1.0, 10.0)
except Exception as e:
assert isinstance(e, DriverError)
c.writerecords([{'geometry': {'type': 'Point', 'coordinates': (2.0, 20.0)}, 'properties': {'title': 'Two'}}])
try:
bounds = c.bounds
assert bounds == (1.0, 10.0, 2.0, 20.0)
except Exception as e:
assert isinstance(e, DriverError)
| {
"content_hash": "8763ae986b640d4a008f7c015f1b1521",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 146,
"avg_line_length": 33.723076923076924,
"alnum_prop": 0.5834854014598541,
"repo_name": "Toblerity/Fiona",
"id": "b7cb5c0231c96d59bae1a7aa8009d737be8edcad",
"size": "2192",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_bounds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Cython",
"bytes": "215771"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "456515"
},
{
"name": "Shell",
"bytes": "4572"
}
],
"symlink_target": ""
} |
"""
sphinx.util.docstrings
~~~~~~~~~~~~~~~~~~~~~~
Utilities for docstring processing.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import warnings
from typing import Dict, List, Tuple
from docutils.parsers.rst.states import Body
from sphinx.deprecation import RemovedInSphinx50Warning, RemovedInSphinx60Warning
field_list_item_re = re.compile(Body.patterns['field_marker'])
def separate_metadata(s: str) -> Tuple[str, Dict[str, str]]:
"""Separate docstring into metadata and others."""
in_other_element = False
metadata: Dict[str, str] = {}
lines = []
if not s:
return s, metadata
for line in prepare_docstring(s):
if line.strip() == '':
in_other_element = False
lines.append(line)
else:
matched = field_list_item_re.match(line)
if matched and not in_other_element:
field_name = matched.group()[1:].split(':', 1)[0]
if field_name.startswith('meta '):
name = field_name[5:].strip()
metadata[name] = line[matched.end():].strip()
else:
lines.append(line)
else:
in_other_element = True
lines.append(line)
return '\n'.join(lines), metadata
def extract_metadata(s: str) -> Dict[str, str]:
warnings.warn("extract_metadata() is deprecated.",
RemovedInSphinx60Warning, stacklevel=2)
docstring, metadata = separate_metadata(s)
return metadata
def prepare_docstring(s: str, ignore: int = None, tabsize: int = 8) -> List[str]:
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
Return the docstring as a list of lines usable for inserting into a docutils
ViewList (used as argument of nested_parse().) An empty line is added to
act as a separator between this docstring and following content.
"""
if ignore is None:
ignore = 1
else:
warnings.warn("The 'ignore' argument to prepare_docstring() is deprecated.",
RemovedInSphinx50Warning, stacklevel=2)
lines = s.expandtabs(tabsize).splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[ignore:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from ignored lines.
for i in range(ignore):
if i < len(lines):
lines[i] = lines[i].lstrip()
if margin < sys.maxsize:
for i in range(ignore, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
# make sure there is an empty line at the end
if lines and lines[-1]:
lines.append('')
return lines
def prepare_commentdoc(s: str) -> List[str]:
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
result = []
lines = [line.strip() for line in s.expandtabs().splitlines()]
for line in lines:
if line.startswith('#:'):
line = line[2:]
# the first space after the comment is ignored
if line and line[0] == ' ':
line = line[1:]
result.append(line)
if result and result[-1]:
result.append('')
return result
| {
"content_hash": "76e2a193378a511874b92b6415a71ce2",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 84,
"avg_line_length": 32.780701754385966,
"alnum_prop": 0.6052983676746053,
"repo_name": "sonntagsgesicht/regtest",
"id": "d81d7dd99dc0ce480ccb3b3be9f940f762d79fc8",
"size": "3737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/sphinx/util/docstrings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
} |
import imath
import IECore
import Gaffer
import GafferUI
import GafferUITest
class GLWidgetTest( GafferUITest.TestCase ) :
def testOverlayParenting( self ) :
w = GafferUI.Window()
g = GafferUI.GLWidget()
f = GafferUI.Frame()
b = GafferUI.Button()
w.setChild( g )
g.addOverlay( f )
f.setChild( b )
self.assertTrue( b.parent() is f )
self.assertTrue( f.parent() is g )
self.assertTrue( g.parent() is w )
self.assertTrue( b.ancestor( GafferUI.GLWidget ) is g )
self.assertTrue( b.ancestor( GafferUI.Frame ) is f )
self.assertTrue( b.ancestor( GafferUI.Window ) is w )
def testOverlayWidgetAt( self ) :
w = GafferUI.Window()
g = GafferUI.GLWidget()
c = GafferUI.GridContainer()
b = GafferUI.Button()
w.setChild( g )
g.addOverlay( c )
c.addChild( b, alignment = ( GafferUI.HorizontalAlignment.None_, GafferUI.VerticalAlignment.Top ) )
w.setVisible( True )
self.waitForIdle( 10000 )
self.assertTrue( GafferUI.Widget.widgetAt( w.bound().min() + imath.V2i( 4 ) ) is b )
def testOverlayBound( self ) :
w = GafferUI.Window()
g = GafferUI.GLWidget()
f = GafferUI.Frame()
b = GafferUI.Button()
w.setChild( g )
g.addOverlay( f )
f.setChild( b )
w.setVisible( True )
self.waitForIdle( 10000 )
w.setPosition( imath.V2i( 100 ) )
self.waitForIdle( 10000 )
b1 = b.bound()
w.setPosition( imath.V2i( 200 ) )
self.waitForIdle( 10000 )
b2 = b.bound()
self.assertEqual( b2.min(), b1.min() + imath.V2i( 100 ) )
self.assertEqual( b2.max(), b1.max() + imath.V2i( 100 ) )
def testOverlayMousePosition( self ) :
w = GafferUI.Window( borderWidth = 10 )
g = GafferUI.GLWidget()
f = GafferUI.Frame()
b = GafferUI.Button()
w.setChild( g )
g.addOverlay( f )
f.setChild( b )
w.setVisible( True )
w.setPosition( imath.V2i( 100 ) )
self.waitForIdle( 1000 )
wBound = w.bound()
bBound = b.bound()
wP = GafferUI.Widget.mousePosition( relativeTo = w )
bP = GafferUI.Widget.mousePosition( relativeTo = b )
self.assertEqual( bBound.min() - wBound.min(), wP - bP )
def testOverlayAccessors( self ) :
g = GafferUI.GLWidget()
b1 = GafferUI.Button()
b2 = GafferUI.Button()
self.assertEqual( b1.parent(), None )
self.assertEqual( b2.parent(), None )
g.addOverlay( b1 )
self.assertEqual( b1.parent(), g )
self.assertEqual( b2.parent(), None )
g.addOverlay( b2 )
self.assertEqual( b1.parent(), g )
self.assertEqual( b2.parent(), g )
g.removeOverlay( b1 )
self.assertEqual( b1.parent(), None )
self.assertEqual( b2.parent(), g )
g.removeOverlay( b2 )
self.assertEqual( b1.parent(), None )
self.assertEqual( b2.parent(), None )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "265f0c620666d0ebf5ddbd3bff8d07bb",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 101,
"avg_line_length": 22.363636363636363,
"alnum_prop": 0.6570583887657059,
"repo_name": "boberfly/gaffer",
"id": "49455e44ab204540344b43423a5614ca36b0a919",
"size": "4509",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUITest/GLWidgetTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
from ev3sensors import EV3ColorSensor
from ev3sensors import EV3GyroSensor
from ev3sensors import EV3UltrasonicSensor
#HiTechnic sensors
from hitechnicsensors import HiTechnicColorSensor
from hitechnicsensors import HiTechnicAccelerometer
from hitechnicsensors import HiTechnicCompass
#NXT sensors
from nxtsensors import NXTUltrasonicSensor | {
"content_hash": "d323bc8ab379786a5bfef24d79461ece",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 31.09090909090909,
"alnum_prop": 0.8976608187134503,
"repo_name": "alexander-svendsen/ev3-python",
"id": "ed28f2d1f1d8c6594e8c4d2e5416d9b0af1b38d1",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ev3/sensors/digital.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6656"
},
{
"name": "JavaScript",
"bytes": "1163811"
},
{
"name": "Python",
"bytes": "128642"
}
],
"symlink_target": ""
} |
"""Tools for constructing domains for expressions. """
from sympy.polys.polyutils import parallel_dict_from_basic
from sympy.polys.polyoptions import build_options
from sympy.polys.polyerrors import GeneratorsNeeded
from sympy.polys.domains import ZZ, QQ, RR, EX
from sympy.assumptions import ask, Q
from sympy.core import sympify
def _construct_simple(coeffs, opt):
"""Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """
result, rationals, reals, algebraics = {}, False, False, False
if opt.extension is True:
is_algebraic = lambda coeff: ask(Q.algebraic(coeff))
else:
is_algebraic = lambda coeff: False
# XXX: add support for a + b*I coefficients
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
if not algebraics:
reals = True
else:
# there are both reals and algebraics -> EX
return False
elif is_algebraic(coeff):
if not reals:
algebraics = True
else:
# there are both algebraics and reals -> EX
return False
else:
# this is a composite domain, e.g. ZZ[X], EX
return None
if algebraics:
domain, result = _construct_algebraic(coeffs, opt)
else:
if reals:
domain = RR
else:
if opt.field or rationals:
domain = QQ
else:
domain = ZZ
result = []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def _construct_algebraic(coeffs, opt):
"""We know that coefficients are algebraic so construct the extension. """
from sympy.polys.numberfields import primitive_element
result, exts = [], set([])
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
g, span, H = primitive_element(exts, ex=True, polys=True)
root = sum([ s*ext for s, ext in zip(span, exts) ])
domain, g = QQ.algebraic_field((g, root)), g.rep.rep
for i, (coeff, a, b) in enumerate(result):
if coeff is not None:
coeff = a*domain.dtype.from_list(H[exts.index(coeff)], g, QQ) + b
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return domain, result
def _construct_composite(coeffs, opt):
"""Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """
numers, denoms = [], []
for coeff in coeffs:
numer, denom = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
try:
polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting
except GeneratorsNeeded:
return None
if any(gen.is_number for gen in gens):
return None # generators are number-like so lets better use EX
n = len(gens)
k = len(polys)//2
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
fractions, zeros = False, (0,)*n
for denom in denoms:
if len(denom) > 1 or zeros not in denom:
fractions = True
break
coeffs = set([])
if not fractions:
for numer, denom in zip(numers, denoms):
denom = denom[zeros]
for monom, coeff in numer.iteritems():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for numer, denom in zip(numers, denoms):
coeffs.update(numer.values())
coeffs.update(denom.values())
rationals, reals = False, False
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
reals = True
break
if reals:
ground = RR
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if not fractions:
domain = ground.poly_ring(*gens)
for numer in numers:
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for numer, denom in zip(numers, denoms):
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
for monom, coeff in denom.iteritems():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return domain, result
def _construct_expression(coeffs, opt):
"""The last resort case, i.e. use the expression domain. """
domain, result = EX, []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def construct_domain(obj, **args):
"""Construct a minimal domain for the list of coefficients. """
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
if not obj:
monoms, coeffs = [], []
else:
monoms, coeffs = zip(*obj.items())
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = map(sympify, coeffs)
result = _construct_simple(coeffs, opt)
if result is not None:
if result is not False:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
else:
if opt.composite:
result = _construct_composite(coeffs, opt)
else:
result = None
if result is not None:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return domain, dict(zip(monoms, coeffs))
else:
return domain, coeffs
else:
return domain, coeffs[0]
| {
"content_hash": "8fde93d298b93703a47dab8c8580d593",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 26.483606557377048,
"alnum_prop": 0.5496750232126276,
"repo_name": "amitjamadagni/sympy",
"id": "d617f8c182aefb69036b1a7cb302fce6411fd882",
"size": "6462",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/polys/constructor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12199014"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
class brocade_vcs(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def vcsmode_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsmode = ET.SubElement(config, "vcsmode", xmlns="urn:brocade.com:mgmt:brocade-vcs")
vcs_mode = ET.SubElement(vcsmode, "vcs-mode")
vcs_mode.text = kwargs.pop('vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsmode_vcs_cluster_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsmode = ET.SubElement(config, "vcsmode", xmlns="urn:brocade.com:mgmt:brocade-vcs")
vcs_cluster_mode = ET.SubElement(vcsmode, "vcs-cluster-mode")
vcs_cluster_mode.text = kwargs.pop('vcs_cluster_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def local_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
local_node = ET.SubElement(config, "local-node", xmlns="urn:brocade.com:mgmt:brocade-vcs")
swbd_number = ET.SubElement(local_node, "swbd-number")
swbd_number.text = kwargs.pop('swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_output_last_config_update_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time = ET.Element("get_last_config_update_time")
config = get_last_config_update_time
output = ET.SubElement(get_last_config_update_time, "output")
last_config_update_time = ET.SubElement(output, "last-config-update-time")
last_config_update_time.text = kwargs.pop('last_config_update_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
principal_switch_wwn = ET.SubElement(output, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_co_ordinator_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
co_ordinator_wwn = ET.SubElement(output, "co-ordinator-wwn")
co_ordinator_wwn.text = kwargs.pop('co_ordinator_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_cluster_type_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_cluster_type_info = ET.SubElement(output, "vcs-cluster-type-info")
vcs_cluster_type_info.text = kwargs.pop('vcs_cluster_type_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_guid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_guid = ET.SubElement(output, "vcs-guid")
vcs_guid.text = kwargs.pop('vcs_guid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_virtual_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
virtual_ip_address = ET.SubElement(output, "virtual-ip-address")
virtual_ip_address.text = kwargs.pop('virtual_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_virtual_ipv6_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
virtual_ipv6_address = ET.SubElement(output, "virtual-ipv6-address")
virtual_ipv6_address.text = kwargs.pop('virtual_ipv6_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_total_nodes_in_cluster(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
total_nodes_in_cluster = ET.SubElement(output, "total-nodes-in-cluster")
total_nodes_in_cluster.text = kwargs.pop('total_nodes_in_cluster')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_nodes_disconnected_from_cluster(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
nodes_disconnected_from_cluster = ET.SubElement(output, "nodes-disconnected-from-cluster")
nodes_disconnected_from_cluster.text = kwargs.pop('nodes_disconnected_from_cluster')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_cluster_generic_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
cluster_generic_status = ET.SubElement(output, "cluster-generic-status")
cluster_generic_status.text = kwargs.pop('cluster_generic_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_cluster_specific_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
cluster_specific_status = ET.SubElement(output, "cluster-specific-status")
cluster_specific_status.text = kwargs.pop('cluster_specific_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_num = ET.SubElement(vcs_node_info, "node-num")
node_num.text = kwargs.pop('node_num')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_serial_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_serial_num = ET.SubElement(vcs_node_info, "node-serial-num")
node_serial_num.text = kwargs.pop('node_serial_num')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_condition(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_condition = ET.SubElement(vcs_node_info, "node-condition")
node_condition.text = kwargs.pop('node_condition')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_status = ET.SubElement(vcs_node_info, "node-status")
node_status.text = kwargs.pop('node_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_hw_sync_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_hw_sync_state = ET.SubElement(vcs_node_info, "node-hw-sync-state")
node_hw_sync_state.text = kwargs.pop('node_hw_sync_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_vcs_mode = ET.SubElement(vcs_node_info, "node-vcs-mode")
node_vcs_mode.text = kwargs.pop('node_vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_vcs_id = ET.SubElement(vcs_node_info, "node-vcs-id")
node_vcs_id.text = kwargs.pop('node_vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_rbridge_id = ET.SubElement(vcs_node_info, "node-rbridge-id")
node_rbridge_id.text = kwargs.pop('node_rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_is_principal(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_is_principal = ET.SubElement(vcs_node_info, "node-is-principal")
node_is_principal.text = kwargs.pop('node_is_principal')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_co_ordinator(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
co_ordinator = ET.SubElement(vcs_node_info, "co-ordinator")
co_ordinator.text = kwargs.pop('co_ordinator')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop('node_switch_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_wwn = ET.SubElement(vcs_node_info, "node-switch-wwn")
node_switch_wwn.text = kwargs.pop('node_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_switch_fcf_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
switch_fcf_mac = ET.SubElement(vcs_node_info, "switch-fcf-mac")
switch_fcf_mac.text = kwargs.pop('switch_fcf_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_internal_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_internal_ip_address = ET.SubElement(vcs_node_info, "node-internal-ip-address")
node_internal_ip_address.text = kwargs.pop('node_internal_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_public_ip_addresses = ET.SubElement(vcs_node_info, "node-public-ip-addresses")
node_public_ip_address = ET.SubElement(node_public_ip_addresses, "node-public-ip-address")
node_public_ip_address.text = kwargs.pop('node_public_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ipv6_addresses_node_public_ipv6_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_public_ipv6_addresses = ET.SubElement(vcs_node_info, "node-public-ipv6-addresses")
node_public_ipv6_address = ET.SubElement(node_public_ipv6_addresses, "node-public-ipv6-address")
node_public_ipv6_address.text = kwargs.pop('node_public_ipv6_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_firmware_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
firmware_version = ET.SubElement(vcs_node_info, "firmware-version")
firmware_version.text = kwargs.pop('firmware_version')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_swbd_number = ET.SubElement(vcs_node_info, "node-swbd-number")
node_swbd_number.text = kwargs.pop('node_swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switchname = ET.SubElement(vcs_node_info, "node-switchname")
node_switchname.text = kwargs.pop('node_switchname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchtype(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switchtype = ET.SubElement(vcs_node_info, "node-switchtype")
node_switchtype.text = kwargs.pop('node_switchtype')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_subtype(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_subtype = ET.SubElement(vcs_node_info, "node-switch-subtype")
node_switch_subtype.text = kwargs.pop('node_switch_subtype')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_description = ET.SubElement(vcs_node_info, "node-switch-description")
node_switch_description.text = kwargs.pop('node_switch_description')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_manufacturer_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
manufacturer_name = ET.SubElement(vcs_node_info, "manufacturer-name")
manufacturer_name.text = kwargs.pop('manufacturer_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_state = ET.SubElement(vcs_node_info, "node-state")
node_state.text = kwargs.pop('node_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_fabric_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_fabric_state = ET.SubElement(vcs_node_info, "node-fabric-state")
node_fabric_state.text = kwargs.pop('node_fabric_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_co_ordinator_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
co_ordinator_wwn = ET.SubElement(vcs_details, "co-ordinator-wwn")
co_ordinator_wwn.text = kwargs.pop('co_ordinator_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_local_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
local_switch_wwn = ET.SubElement(vcs_details, "local-switch-wwn")
local_switch_wwn.text = kwargs.pop('local_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_mode = ET.SubElement(vcs_details, "node-vcs-mode")
node_vcs_mode.text = kwargs.pop('node_vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_type = ET.SubElement(vcs_details, "node-vcs-type")
node_vcs_type.text = kwargs.pop('node_vcs_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_id = ET.SubElement(vcs_details, "node-vcs-id")
node_vcs_id.text = kwargs.pop('node_vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_config_input_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_config = ET.Element("vcs_rbridge_config")
config = vcs_rbridge_config
input = ET.SubElement(vcs_rbridge_config, "input")
vcs_id = ET.SubElement(input, "vcs-id")
vcs_id.text = kwargs.pop('vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_config_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_config = ET.Element("vcs_rbridge_config")
config = vcs_rbridge_config
input = ET.SubElement(vcs_rbridge_config, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_context_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_context = ET.Element("vcs_rbridge_context")
config = vcs_rbridge_context
input = ET.SubElement(vcs_rbridge_context, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_input_xpath_strings_xpath_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
input = ET.SubElement(get_last_config_update_time_for_xpaths, "input")
xpath_strings = ET.SubElement(input, "xpath-strings")
xpath_string = ET.SubElement(xpath_strings, "xpath-string")
xpath_string.text = kwargs.pop('xpath_string')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_output_last_config_update_time_for_xpaths_xpath_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
output = ET.SubElement(get_last_config_update_time_for_xpaths, "output")
last_config_update_time_for_xpaths = ET.SubElement(output, "last-config-update-time-for-xpaths")
xpath_string = ET.SubElement(last_config_update_time_for_xpaths, "xpath-string")
xpath_string.text = kwargs.pop('xpath_string')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_output_last_config_update_time_for_xpaths_last_config_update_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
output = ET.SubElement(get_last_config_update_time_for_xpaths, "output")
last_config_update_time_for_xpaths = ET.SubElement(output, "last-config-update-time-for-xpaths")
xpath_string_key = ET.SubElement(last_config_update_time_for_xpaths, "xpath-string")
xpath_string_key.text = kwargs.pop('xpath_string')
last_config_update_time = ET.SubElement(last_config_update_time_for_xpaths, "last-config-update-time")
last_config_update_time.text = kwargs.pop('last_config_update_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ip_address_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address = ET.SubElement(address, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ip_address_inband_interface_ve(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address_key = ET.SubElement(address, "address")
address_key.text = kwargs.pop('address')
inband = ET.SubElement(address, "inband")
interface = ET.SubElement(inband, "interface")
ve = ET.SubElement(interface, "ve")
ve.text = kwargs.pop('ve')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ipv6_address_ipv6address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ipv6 = ET.SubElement(virtual, "ipv6")
address = ET.SubElement(ipv6, "address")
ipv6address = ET.SubElement(address, "ipv6address")
ipv6address.text = kwargs.pop('ipv6address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_fabric_vfab_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual_fabric = ET.SubElement(vcs, "virtual-fabric")
vfab_enable = ET.SubElement(virtual_fabric, "vfab-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIdentifier(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier")
switchIdentifier.text = kwargs.pop('switchIdentifier')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchVcsId(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId")
switchVcsId.text = kwargs.pop('switchVcsId')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIpV6Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address")
switchIpV6Address.text = kwargs.pop('switchIpV6Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_nodeRbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeRbridgeid = ET.SubElement(vcsNodeState, "nodeRbridgeid")
nodeRbridgeid.text = kwargs.pop('nodeRbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_nodeState(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeState = ET.SubElement(vcsNodeState, "nodeState")
nodeState.text = kwargs.pop('nodeState')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsmode_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsmode = ET.SubElement(config, "vcsmode", xmlns="urn:brocade.com:mgmt:brocade-vcs")
vcs_mode = ET.SubElement(vcsmode, "vcs-mode")
vcs_mode.text = kwargs.pop('vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsmode_vcs_cluster_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsmode = ET.SubElement(config, "vcsmode", xmlns="urn:brocade.com:mgmt:brocade-vcs")
vcs_cluster_mode = ET.SubElement(vcsmode, "vcs-cluster-mode")
vcs_cluster_mode.text = kwargs.pop('vcs_cluster_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def local_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
local_node = ET.SubElement(config, "local-node", xmlns="urn:brocade.com:mgmt:brocade-vcs")
swbd_number = ET.SubElement(local_node, "swbd-number")
swbd_number.text = kwargs.pop('swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_output_last_config_update_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time = ET.Element("get_last_config_update_time")
config = get_last_config_update_time
output = ET.SubElement(get_last_config_update_time, "output")
last_config_update_time = ET.SubElement(output, "last-config-update-time")
last_config_update_time.text = kwargs.pop('last_config_update_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
principal_switch_wwn = ET.SubElement(output, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_co_ordinator_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
co_ordinator_wwn = ET.SubElement(output, "co-ordinator-wwn")
co_ordinator_wwn.text = kwargs.pop('co_ordinator_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_cluster_type_info(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_cluster_type_info = ET.SubElement(output, "vcs-cluster-type-info")
vcs_cluster_type_info.text = kwargs.pop('vcs_cluster_type_info')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_guid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_guid = ET.SubElement(output, "vcs-guid")
vcs_guid.text = kwargs.pop('vcs_guid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_virtual_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
virtual_ip_address = ET.SubElement(output, "virtual-ip-address")
virtual_ip_address.text = kwargs.pop('virtual_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_virtual_ipv6_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
virtual_ipv6_address = ET.SubElement(output, "virtual-ipv6-address")
virtual_ipv6_address.text = kwargs.pop('virtual_ipv6_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_total_nodes_in_cluster(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
total_nodes_in_cluster = ET.SubElement(output, "total-nodes-in-cluster")
total_nodes_in_cluster.text = kwargs.pop('total_nodes_in_cluster')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_nodes_disconnected_from_cluster(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
nodes_disconnected_from_cluster = ET.SubElement(output, "nodes-disconnected-from-cluster")
nodes_disconnected_from_cluster.text = kwargs.pop('nodes_disconnected_from_cluster')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_cluster_generic_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
cluster_generic_status = ET.SubElement(output, "cluster-generic-status")
cluster_generic_status.text = kwargs.pop('cluster_generic_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_cluster_specific_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
cluster_specific_status = ET.SubElement(output, "cluster-specific-status")
cluster_specific_status.text = kwargs.pop('cluster_specific_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_num = ET.SubElement(vcs_node_info, "node-num")
node_num.text = kwargs.pop('node_num')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_serial_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_serial_num = ET.SubElement(vcs_node_info, "node-serial-num")
node_serial_num.text = kwargs.pop('node_serial_num')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_condition(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_condition = ET.SubElement(vcs_node_info, "node-condition")
node_condition.text = kwargs.pop('node_condition')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_status = ET.SubElement(vcs_node_info, "node-status")
node_status.text = kwargs.pop('node_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_hw_sync_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_hw_sync_state = ET.SubElement(vcs_node_info, "node-hw-sync-state")
node_hw_sync_state.text = kwargs.pop('node_hw_sync_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_vcs_mode = ET.SubElement(vcs_node_info, "node-vcs-mode")
node_vcs_mode.text = kwargs.pop('node_vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_vcs_id = ET.SubElement(vcs_node_info, "node-vcs-id")
node_vcs_id.text = kwargs.pop('node_vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_rbridge_id = ET.SubElement(vcs_node_info, "node-rbridge-id")
node_rbridge_id.text = kwargs.pop('node_rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_is_principal(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_is_principal = ET.SubElement(vcs_node_info, "node-is-principal")
node_is_principal.text = kwargs.pop('node_is_principal')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_co_ordinator(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
co_ordinator = ET.SubElement(vcs_node_info, "co-ordinator")
co_ordinator.text = kwargs.pop('co_ordinator')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop('node_switch_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_wwn = ET.SubElement(vcs_node_info, "node-switch-wwn")
node_switch_wwn.text = kwargs.pop('node_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_switch_fcf_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
switch_fcf_mac = ET.SubElement(vcs_node_info, "switch-fcf-mac")
switch_fcf_mac.text = kwargs.pop('switch_fcf_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_internal_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_internal_ip_address = ET.SubElement(vcs_node_info, "node-internal-ip-address")
node_internal_ip_address.text = kwargs.pop('node_internal_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ip_addresses_node_public_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_public_ip_addresses = ET.SubElement(vcs_node_info, "node-public-ip-addresses")
node_public_ip_address = ET.SubElement(node_public_ip_addresses, "node-public-ip-address")
node_public_ip_address.text = kwargs.pop('node_public_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_public_ipv6_addresses_node_public_ipv6_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_public_ipv6_addresses = ET.SubElement(vcs_node_info, "node-public-ipv6-addresses")
node_public_ipv6_address = ET.SubElement(node_public_ipv6_addresses, "node-public-ipv6-address")
node_public_ipv6_address.text = kwargs.pop('node_public_ipv6_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_firmware_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
firmware_version = ET.SubElement(vcs_node_info, "firmware-version")
firmware_version.text = kwargs.pop('firmware_version')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_swbd_number(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_swbd_number = ET.SubElement(vcs_node_info, "node-swbd-number")
node_swbd_number.text = kwargs.pop('node_swbd_number')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switchname = ET.SubElement(vcs_node_info, "node-switchname")
node_switchname.text = kwargs.pop('node_switchname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchtype(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switchtype = ET.SubElement(vcs_node_info, "node-switchtype")
node_switchtype.text = kwargs.pop('node_switchtype')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_subtype(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_subtype = ET.SubElement(vcs_node_info, "node-switch-subtype")
node_switch_subtype.text = kwargs.pop('node_switch_subtype')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_description = ET.SubElement(vcs_node_info, "node-switch-description")
node_switch_description.text = kwargs.pop('node_switch_description')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_manufacturer_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
manufacturer_name = ET.SubElement(vcs_node_info, "manufacturer-name")
manufacturer_name.text = kwargs.pop('manufacturer_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_state = ET.SubElement(vcs_node_info, "node-state")
node_state.text = kwargs.pop('node_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def show_vcs_output_vcs_nodes_vcs_node_info_node_fabric_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_fabric_state = ET.SubElement(vcs_node_info, "node-fabric-state")
node_fabric_state.text = kwargs.pop('node_fabric_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_co_ordinator_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
co_ordinator_wwn = ET.SubElement(vcs_details, "co-ordinator-wwn")
co_ordinator_wwn.text = kwargs.pop('co_ordinator_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_local_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
local_switch_wwn = ET.SubElement(vcs_details, "local-switch-wwn")
local_switch_wwn.text = kwargs.pop('local_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_mode = ET.SubElement(vcs_details, "node-vcs-mode")
node_vcs_mode.text = kwargs.pop('node_vcs_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_type = ET.SubElement(vcs_details, "node-vcs-type")
node_vcs_type.text = kwargs.pop('node_vcs_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_vcs_details_output_vcs_details_node_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
node_vcs_id = ET.SubElement(vcs_details, "node-vcs-id")
node_vcs_id.text = kwargs.pop('node_vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_config_input_vcs_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_config = ET.Element("vcs_rbridge_config")
config = vcs_rbridge_config
input = ET.SubElement(vcs_rbridge_config, "input")
vcs_id = ET.SubElement(input, "vcs-id")
vcs_id.text = kwargs.pop('vcs_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_config_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_config = ET.Element("vcs_rbridge_config")
config = vcs_rbridge_config
input = ET.SubElement(vcs_rbridge_config, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_rbridge_context_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs_rbridge_context = ET.Element("vcs_rbridge_context")
config = vcs_rbridge_context
input = ET.SubElement(vcs_rbridge_context, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_input_xpath_strings_xpath_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
input = ET.SubElement(get_last_config_update_time_for_xpaths, "input")
xpath_strings = ET.SubElement(input, "xpath-strings")
xpath_string = ET.SubElement(xpath_strings, "xpath-string")
xpath_string.text = kwargs.pop('xpath_string')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_output_last_config_update_time_for_xpaths_xpath_string(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
output = ET.SubElement(get_last_config_update_time_for_xpaths, "output")
last_config_update_time_for_xpaths = ET.SubElement(output, "last-config-update-time-for-xpaths")
xpath_string = ET.SubElement(last_config_update_time_for_xpaths, "xpath-string")
xpath_string.text = kwargs.pop('xpath_string')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_last_config_update_time_for_xpaths_output_last_config_update_time_for_xpaths_last_config_update_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time_for_xpaths = ET.Element("get_last_config_update_time_for_xpaths")
config = get_last_config_update_time_for_xpaths
output = ET.SubElement(get_last_config_update_time_for_xpaths, "output")
last_config_update_time_for_xpaths = ET.SubElement(output, "last-config-update-time-for-xpaths")
xpath_string_key = ET.SubElement(last_config_update_time_for_xpaths, "xpath-string")
xpath_string_key.text = kwargs.pop('xpath_string')
last_config_update_time = ET.SubElement(last_config_update_time_for_xpaths, "last-config-update-time")
last_config_update_time.text = kwargs.pop('last_config_update_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ip_address_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address = ET.SubElement(address, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ip_address_inband_interface_ve(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address_key = ET.SubElement(address, "address")
address_key.text = kwargs.pop('address')
inband = ET.SubElement(address, "inband")
interface = ET.SubElement(inband, "interface")
ve = ET.SubElement(interface, "ve")
ve.text = kwargs.pop('ve')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_ipv6_address_ipv6address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ipv6 = ET.SubElement(virtual, "ipv6")
address = ET.SubElement(ipv6, "address")
ipv6address = ET.SubElement(address, "ipv6address")
ipv6address.text = kwargs.pop('ipv6address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcs_virtual_fabric_vfab_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual_fabric = ET.SubElement(vcs, "virtual-fabric")
vfab_enable = ET.SubElement(virtual_fabric, "vfab-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIdentifier(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier")
switchIdentifier.text = kwargs.pop('switchIdentifier')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchVcsId(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId")
switchVcsId.text = kwargs.pop('switchVcsId')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_originator_switch_info_switchIpV6Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address")
switchIpV6Address.text = kwargs.pop('switchIpV6Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_nodeRbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeRbridgeid = ET.SubElement(vcsNodeState, "nodeRbridgeid")
nodeRbridgeid.text = kwargs.pop('nodeRbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def vcsNodeState_nodeState(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeState = ET.SubElement(vcsNodeState, "nodeState")
nodeState.text = kwargs.pop('nodeState')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| {
"content_hash": "a36c5521a0b1c3bd7f33ffb8dcfbcb9f",
"timestamp": "",
"source": "github",
"line_count": 1704,
"max_line_length": 129,
"avg_line_length": 42.98122065727699,
"alnum_prop": 0.6172583287820863,
"repo_name": "brocade/pynos",
"id": "83d057d7c9a5b55667d02e30cdc336367d65c074",
"size": "73262",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pynos/versions/ver_7/ver_7_0_0/yang/brocade_vcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44939907"
}
],
"symlink_target": ""
} |
"""
Interface for openaps device "pump"
TODO: Document exceptions
"""
from datetime import datetime
from datetime import timedelta
from dateutil.parser import parse
from itertools import cycle
import json
from cachetools import lru_cache
from cachetools import ttl_cache
#
# Caching
#
# Scatter long-lived caches by 6-minute intervals so a 5-minute cycle doesn't end up with all misses
_cache_scatter_minutes = cycle(range(-49, 0, 7))
def _proxy_cache(from_func, to_func):
"""Proxies the cache management calls from a function generated by cachedfunc to another function
:param from_func:
:type from_func: function
:param to_func:
:type to_func: function
"""
to_func.cache_info = from_func.cache_info
to_func.cache_clear = from_func.cache_clear
class _CacheInfo():
def __init__(self):
pass
def __call__(self):
import inspect
module = inspect.getmodule(self.__class__)
members = inspect.getmembers(module, lambda value: hasattr(value, 'cache_info'))
return {name: value.cache_info() for name, value in members if not name.startswith('_')}
cache_info = _CacheInfo()
#
# Pump data
#
@ttl_cache(ttl=24 * 60 * 60 + next(_cache_scatter_minutes))
def basal_schedule():
"""
:return:
:rtype: list(dict)
"""
return json.loads(_pump_output("read_selected_basal_profile"))
@ttl_cache(ttl=24 * 60 * 60 + next(_cache_scatter_minutes))
def _carb_ratio_schedule():
"""
:return:
:rtype: list(dict(str, float))
"""
return json.loads(_pump_output("read_carb_ratios"))["schedule"]
def carb_ratio_at_time(pump_time):
"""Returns the carb ratio at a given time of day on the pump clock
Note that the parsing here is only applicable to MMX23 models and later.
:param pump_time:
:type pump_time: datetime.time
:return:
:rtype: float
:raises IndexError:
"""
pump_time_minutes = pump_time.hour * 60.0 + pump_time.minute + pump_time.second / 60.0
carb_ratio_schedule = _carb_ratio_schedule()
ratio = None
for ratio_dict in carb_ratio_schedule:
if pump_time_minutes < ratio_dict["offset"]:
break
else:
ratio = ratio_dict["ratio"]
if ratio is None:
raise IndexError("No carb ratio found at time {}".format(pump_time_minutes))
return ratio
_proxy_cache(_carb_ratio_schedule, carb_ratio_at_time)
def read_clock():
"""Returns the current date and time from the pump's system clock
:return:
:rtype: str
"""
return json.loads(_pump_output("read_clock"))
@lru_cache(maxsize=1)
def _latest_sensor_glucose_entry_in_range(from_datetime, to_datetime):
"""Returns the latest sensor glucose history entry in the specified range
:param from_datetime:
:type from_datetime: datetime.datetime
:param to_datetime:
:type to_datetime: datetime.datetime
:return: A dictionary describing the glucose reading, or None if no glucose readings were found
:rtype: dict|NoneType
"""
glucose_pages_dict = json.loads(
_pump_output(
"filter_glucose_date",
from_datetime.isoformat(),
to_datetime.isoformat()
)
)
last_page = glucose_pages_dict["end"]
glucose_history = json.loads(_pump_output("read_glucose_data", str(last_page)))
glucose_iterator = (x for x in reversed(glucose_history) if x["name"] in ("GlucoseSensorData",
"CalBGForGH"))
last_datetime = to_datetime
while from_datetime <= last_datetime:
try:
glucose_dict = next(glucose_iterator)
except StopIteration:
break
last_datetime = parse(glucose_dict["date"])
amount = glucose_dict.get("sgv", glucose_dict.get("amount", 0))
if amount > 0 and from_datetime <= last_datetime <= to_datetime:
return glucose_dict
def _latest_meter_glucose_entry_in_range(from_datetime, to_datetime):
"""Returns the latest meter glucose entry in the specified range
:param from_datetime:
:type from_datetime: datetime.datetime
:param to_datetime:
:type to_datetime: datetime.datetime
:return: A dictionary describing the glucose reading, or None if no glucose readings were found
:rtype: dict|NoneType
"""
pump_history = history_in_range(from_datetime, to_datetime)
for history_dict in [h for h in pump_history if h.get("_type") == "CalBGForPH"]:
amount = history_dict.get("amount", 0)
if amount > 0 and from_datetime <= parse(history_dict["timestamp"]) <= to_datetime:
return history_dict
def glucose_level_at_datetime(pump_datetime):
"""Returns the most-recent glucose level at a specified time in the sensor and history
Returns None if no glucose readings were recorded in the 15 minutes before `pump_datetime`
:param pump_datetime:
:type pump_datetime: datetime.datetime
:return: A tuple containing the most-recent glucose level (mg/dL), and its timestamp, or Nones
:rtype: tuple(int|NoneType, datetime.datetime|NoneType)
"""
# truncate the seconds to create a 60s ttl
to_datetime = pump_datetime.replace(second=0, microsecond=0)
from_datetime = to_datetime - timedelta(minutes=15)
glucose_history_dict = _latest_sensor_glucose_entry_in_range(from_datetime, to_datetime) or {}
amount = glucose_history_dict.get("sgv", glucose_history_dict.get("amount"))
if amount is not None:
glucose_datetime = glucose_history_dict["date"]
else:
glucose_history_dict = _latest_meter_glucose_entry_in_range(from_datetime,
to_datetime) or {}
amount = glucose_history_dict.get("amount")
glucose_datetime = glucose_history_dict.get("timestamp")
if amount is not None:
return amount, parse(glucose_datetime)
else:
return amount, None
_proxy_cache(_latest_sensor_glucose_entry_in_range, glucose_level_at_datetime)
@lru_cache(maxsize=2)
def _history_in_range(from_datetime, to_datetime):
next_page_num = 0
last_datetime = to_datetime
history = []
# Expect entries may be out-of-order up to this amount
time_discrepancy = timedelta(minutes=5)
while last_datetime + time_discrepancy > from_datetime:
history.extend(json.loads(_pump_output("read_history_data", str(next_page_num))))
next_page_num += 1
if len(history) > 0:
last_entry = history[-1]
try:
last_datetime = parse(last_entry.get("timestamp"))
except (AttributeError, ValueError):
pass
return history
def history_in_range(from_datetime, to_datetime):
# truncate the seconds to create a 60s ttl
return _history_in_range(
from_datetime.replace(second=0, microsecond=0),
to_datetime.replace(second=0, microsecond=0)
)
_proxy_cache(_history_in_range, history_in_range)
@ttl_cache(ttl=24 * 60 * 60 + next(_cache_scatter_minutes))
def insulin_action_curve():
"""
:return:
:rtype: int
"""
settings_dict = json.loads(_pump_output("read_settings"))
return settings_dict["insulin_action_curve"]
@ttl_cache(ttl=24 * 60 * 60 + next(_cache_scatter_minutes))
def _insulin_sensitivity_schedule():
insulin_sensitivies_dict = json.loads(_pump_output("read_insulin_sensitivies"))
return insulin_sensitivies_dict["sensitivities"]
def insulin_sensitivity_at_time(pump_time):
"""Returns the insulin sensitivity at a given time of day on the pump clock
:param pump_time:
:type pump_time: datetime.time
:return:
:rtype: int
"""
# TODO: Support a sensitivity schedule
return _insulin_sensitivity_schedule()[0]["sensitivity"]
_proxy_cache(_insulin_sensitivity_schedule, insulin_sensitivity_at_time)
def _pump_output(*args):
"""Executes an `openaps use` command against the `pump` device
TODO: Expect `report` calls instead of `use` calls
:param args:
:type args: tuple(str)
:return:
:rtype: str
:raises CalledProcessError:
"""
from subprocess import STDOUT
from subprocess import check_output
args_list = ["openaps", "use", "pump"]
args_list.extend(args)
return check_output(args_list, stderr=STDOUT)
| {
"content_hash": "facae421b7ab394b201958320b6c09f3",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 101,
"avg_line_length": 29.646643109540637,
"alnum_prop": 0.6537544696066746,
"repo_name": "loudnate/rpi-openaps-playground",
"id": "9fbd473fef129492ce34dc48141ef42e62e24d3c",
"size": "8390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6448"
},
{
"name": "JavaScript",
"bytes": "16497"
},
{
"name": "Python",
"bytes": "13702"
},
{
"name": "Shell",
"bytes": "544"
}
],
"symlink_target": ""
} |
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_resting')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
- One subject
- Two runs of 10 min of resting state recordings
- Eyes open
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_resting.tar.gz')
_data_path_doc = _data_path_doc.format(name='brainstorm',
conf='MNE_DATASETS_BRAINSTORM_DATA'
'_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_resting) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_resting')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_resting) dataset."""
for desc in _description.splitlines():
print(desc)
| {
"content_hash": "d8449123132e8489dddf382e63755fe6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 32.674418604651166,
"alnum_prop": 0.61067615658363,
"repo_name": "adykstra/mne-python",
"id": "f04da4bb17795ed2adc117989ee110fab720fa86",
"size": "1490",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mne/datasets/brainstorm/bst_resting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6001033"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def delete(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, load_balancer_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancer
<azure.mgmt.network.v2016_09_01.models.LoadBalancer>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load
balancer operation.
:type parameters: :class:`LoadBalancer
<azure.mgmt.network.v2016_09_01.models.LoadBalancer>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`LoadBalancer
<azure.mgmt.network.v2016_09_01.models.LoadBalancer>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LoadBalancer')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', response)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancerPaged
<azure.mgmt.network.v2016_09_01.models.LoadBalancerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancerPaged
<azure.mgmt.network.v2016_09_01.models.LoadBalancerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| {
"content_hash": "1e3e435ad9f46d3f33ae7e7fe03e2cf0",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 144,
"avg_line_length": 44.83460559796438,
"alnum_prop": 0.6332009080590238,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "afb0b57da42acb528aec949240187c65ab26258f",
"size": "18094",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/load_balancers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg']='image/x-dwg'
mimetypes.types_map['.ico']='image/x-icon'
mimetypes.types_map['.bz2']='application/x-bzip2'
mimetypes.types_map['.gz']='application/x-gzip'
import os
import re
import stat
import time
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
from cherrypy.lib.static import _serve_fileobj
def serve_file(path, content_type=None, disposition=None, name=None, content_length=None,debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
if not content_length :
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug) | {
"content_hash": "85bbf9898b9ce2dcf31d26034d7c55ea",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 102,
"avg_line_length": 36.638297872340424,
"alnum_prop": 0.6573751451800233,
"repo_name": "sjolicoeur/Annelia",
"id": "470b57b719e256b311778dd5de2f32e3786963cf",
"size": "3539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "annelia/monkey_staticserve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13705"
}
],
"symlink_target": ""
} |
import os
import unittest
import boto3
from haascli.haas import cli
from click.testing import CliRunner
from moto import mock_cloudformation
from utils import create_stack
from haascli import ROOT_DIR
class TestStackDelete(unittest.TestCase):
'''
'''
def setUp(self):
# create runner
self.runner = CliRunner()
mock = mock_cloudformation()
mock.start()
self.mock_client = boto3.client('cloudformation')
# open template body
f = open(os.path.join(ROOT_DIR, 'tests', 'mock.json'), 'r')
self.template_body = f.read()
f.close() # use explicit close to suppress moto warning
def tearDown(self):
pass
@mock_cloudformation
def test_delete(self):
# first created these stacks in setUp, but didn't carry over
# "create" two stacks
create_stack('tank', 'never', self.template_body, self.mock_client)
create_stack('beef', 'canoe', self.template_body, self.mock_client)
# use incorrect name
r = self.runner.invoke(cli, ['stack', 'delete', 'booty'])
# check that both are still there
r = self.runner.invoke(cli, ['stack', 'list'])
self.assertEqual(0, r.exit_code)
statuses = r.output.strip().split('\n')
self.assertEqual(2, len(statuses))
self.assertTrue(statuses[0].startswith('tank'))
self.assertTrue(statuses[1].startswith('beef'))
# use correct name
r = self.runner.invoke(cli, ['stack', 'delete', 'beef'])
# check that both are still there
r = self.runner.invoke(cli, ['stack', 'list'])
self.assertEqual(0, r.exit_code)
statuses = r.output.strip().split('\n')
self.assertEqual(1, len(statuses))
self.assertTrue(statuses[0].startswith('tank'))
# try it again
r = self.runner.invoke(cli, ['stack', 'delete', 'beef'])
# check that both are still there
r = self.runner.invoke(cli, ['stack', 'list'])
self.assertEqual(0, r.exit_code)
statuses = r.output.strip().split('\n')
self.assertEqual(1, len(statuses))
self.assertTrue(statuses[0].startswith('tank'))
self.assertTrue('beef' not in r.output)
# remove last one
r = self.runner.invoke(cli, ['stack', 'delete', 'tank'])
# check it
r = self.runner.invoke(cli, ['stack', 'list'])
self.assertEqual(0, r.exit_code)
statuses = r.output.strip().split('\n')
self.assertTrue('tank' not in r.output)
| {
"content_hash": "30ac149fffaf25cd43426360ca6aaf52",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 34.08,
"alnum_prop": 0.602112676056338,
"repo_name": "vin0110/haas",
"id": "3dc90bc59ab7267f54f780ba9795da635a348876",
"size": "2556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_stack_delete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "89257"
},
{
"name": "Shell",
"bytes": "4895"
}
],
"symlink_target": ""
} |
import logging
from chatterbot import languages
from chatterbot.tagging import PosLemmaTagger
class StorageAdapter(object):
"""
This is an abstract class that represents the interface
that all storage adapters should implement.
"""
def __init__(self, *args, **kwargs):
"""
Initialize common attributes shared by all storage adapters.
:param str tagger_language: The language that the tagger uses to remove stopwords.
"""
self.logger = kwargs.get('logger', logging.getLogger(__name__))
Tagger = kwargs.get('tagger', PosLemmaTagger)
self.tagger = Tagger(language=kwargs.get(
'tagger_language', languages.ENG
))
def get_model(self, model_name):
"""
Return the model class for a given model name.
model_name is case insensitive.
"""
get_model_method = getattr(self, 'get_%s_model' % (
model_name.lower(),
))
return get_model_method()
def get_object(self, object_name):
"""
Return the class for a given object name.
object_name is case insensitive.
"""
get_model_method = getattr(self, 'get_%s_object' % (
object_name.lower(),
))
return get_model_method()
def get_statement_object(self):
from chatterbot.conversation import Statement
StatementModel = self.get_model('statement')
Statement.statement_field_names.extend(
StatementModel.extra_statement_field_names
)
return Statement
def count(self):
"""
Return the number of entries in the database.
"""
raise self.AdapterMethodNotImplementedError(
'The `count` method is not implemented by this adapter.'
)
def remove(self, statement_text):
"""
Removes the statement that matches the input text.
Removes any responses from statements where the response text matches
the input text.
"""
raise self.AdapterMethodNotImplementedError(
'The `remove` method is not implemented by this adapter.'
)
def filter(self, **kwargs):
"""
Returns a list of objects from the database.
The kwargs parameter can contain any number
of attributes. Only objects which contain
all listed attributes and in which all values
match for all listed attributes will be returned.
:param page_size: The maximum number of records to load into
memory at once when returning results.
Defaults to 1000
:param order_by: The field name that should be used to determine
the order that results are returned in.
Defaults to None
:param tags: A list of tags. When specified, the results will only
include statements that have a tag in the provided list.
Defaults to [] (empty list)
:param exclude_text: If the ``text`` of a statement is an exact match
for the value of this parameter the statement will not be
included in the result set.
Defaults to None
:param exclude_text_words: If the ``text`` of a statement contains a
word from this list then the statement will not be included in
the result set.
Defaults to [] (empty list)
:param persona_not_startswith: If the ``persona`` field of a
statement starts with the value specified by this parameter,
then the statement will not be returned in the result set.
Defaults to None
:param search_text_contains: If the ``search_text`` field of a
statement contains a word that is in the string provided to
this parameter, then the statement will be included in the
result set.
Defaults to None
"""
raise self.AdapterMethodNotImplementedError(
'The `filter` method is not implemented by this adapter.'
)
def create(self, **kwargs):
"""
Creates a new statement matching the keyword arguments specified.
Returns the created statement.
"""
raise self.AdapterMethodNotImplementedError(
'The `create` method is not implemented by this adapter.'
)
def create_many(self, statements):
"""
Creates multiple statement entries.
"""
raise self.AdapterMethodNotImplementedError(
'The `create_many` method is not implemented by this adapter.'
)
def update(self, statement):
"""
Modifies an entry in the database.
Creates an entry if one does not exist.
"""
raise self.AdapterMethodNotImplementedError(
'The `update` method is not implemented by this adapter.'
)
def get_random(self):
"""
Returns a random statement from the database.
"""
raise self.AdapterMethodNotImplementedError(
'The `get_random` method is not implemented by this adapter.'
)
def drop(self):
"""
Drop the database attached to a given adapter.
"""
raise self.AdapterMethodNotImplementedError(
'The `drop` method is not implemented by this adapter.'
)
class EmptyDatabaseException(Exception):
def __init__(self, message=None):
default = 'The database currently contains no entries. At least one entry is expected. You may need to train your chat bot to populate your database.'
super().__init__(message or default)
class AdapterMethodNotImplementedError(NotImplementedError):
"""
An exception to be raised when a storage adapter method has not been implemented.
Typically this indicates that the method should be implement in a subclass.
"""
pass
| {
"content_hash": "df43636a51aebd2cda2ee66041c8fc16",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 162,
"avg_line_length": 33.64971751412429,
"alnum_prop": 0.6180322364002686,
"repo_name": "gunthercox/ChatterBot",
"id": "335a65fa6a012ad218036a3a7bef35e3c0847dee",
"size": "5956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chatterbot/storage/storage_adapter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "375133"
}
],
"symlink_target": ""
} |
import requests
def all():
r = requests.get('http://arkapi:5001/page')
return r
def find(path):
r = requests.get('http://arkapi:5001/page/find/' + path)
return r
| {
"content_hash": "1173e4281df4917c9b50d7308a00e00c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 20,
"alnum_prop": 0.6277777777777778,
"repo_name": "allison-knauss/arkweb",
"id": "7371490a82e721ba66742590b15c32a1bea06e9a",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/clients/page_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "7126"
},
{
"name": "Python",
"bytes": "14580"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Trace treadmill application events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import socket
import sys
import click
from treadmill import context
from treadmill import discovery
from treadmill import cli
_LOGGER = logging.getLogger()
def _iterate(discovery_iter, check_state, sep):
"""Iterate and output discovered endpoints."""
for (app, hostport) in discovery_iter:
if hostport:
state = ''
if check_state:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
try:
host, port = hostport.split(':')
sock.connect((host, int(port)))
sock.close()
state = 'up'
except socket.error:
state = 'down'
record = [app, hostport]
if state:
record.append(state)
output = sep.join(record)
else:
output = app
print(output.strip())
sys.stdout.flush()
def init():
"""Return top level command handler."""
@click.command()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--zookeeper', required=False,
envvar='TREADMILL_ZOOKEEPER',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--watch', is_flag=True, default=False)
@click.option('--check-state', is_flag=True, default=False)
@click.option('--separator', default=' ')
@click.argument('app')
@click.argument('endpoint', required=False)
def top(watch, check_state, separator, app, endpoint):
"""Discover container endpoints."""
if not endpoint:
endpoint = '*'
discovery_iter = discovery.iterator(
context.GLOBAL.zk.conn, app, endpoint, watch)
_iterate(discovery_iter, check_state, separator)
return top
| {
"content_hash": "498e7e1c7f0ab2d8ef033af196d21e68",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 72,
"avg_line_length": 28.294871794871796,
"alnum_prop": 0.57091073855913,
"repo_name": "Morgan-Stanley/treadmill",
"id": "aa1cec58033744f072e0730daedd123f4fa51a36",
"size": "2207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/cli/admin/discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3750"
},
{
"name": "Python",
"bytes": "3372983"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "51646"
}
],
"symlink_target": ""
} |
""" run test suites written for nose. """
import pytest, py
import inspect
import sys
def pytest_runtest_makereport(__multicall__, item, call):
SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None)
if SkipTest:
if call.excinfo and call.excinfo.errisinstance(SkipTest):
# let's substitute the excinfo with a py.test.skip one
call2 = call.__class__(lambda: py.test.skip(str(call.excinfo.value)), call.when)
call.excinfo = call2.excinfo
@pytest.mark.trylast
def pytest_runtest_setup(item):
if isinstance(item, (pytest.Function)):
if isinstance(item.parent, pytest.Generator):
gen = item.parent
if not hasattr(gen, '_nosegensetup'):
call_optional(gen.obj, 'setup')
if isinstance(gen.parent, pytest.Instance):
call_optional(gen.parent.obj, 'setup')
gen._nosegensetup = True
if not call_optional(item.obj, 'setup'):
# call module level setup if there is no object level one
call_optional(item.parent.obj, 'setup')
def pytest_runtest_teardown(item):
if isinstance(item, pytest.Function):
if not call_optional(item.obj, 'teardown'):
call_optional(item.parent.obj, 'teardown')
#if hasattr(item.parent, '_nosegensetup'):
# #call_optional(item._nosegensetup, 'teardown')
# del item.parent._nosegensetup
def pytest_make_collect_report(collector):
if isinstance(collector, pytest.Generator):
call_optional(collector.obj, 'setup')
def call_optional(obj, name):
method = getattr(obj, name, None)
if method is not None and not hasattr(method, "_pytestfixturefunction") and py.builtin.callable(method):
# If there's any problems allow the exception to raise rather than
# silently ignoring them
method()
return True
| {
"content_hash": "950d00b319d897bf414518b52e6120ad",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 108,
"avg_line_length": 39.875,
"alnum_prop": 0.6405433646812957,
"repo_name": "jeppeter/pytest",
"id": "ef7b781839b9d1e4044207fb678b6d2900b5bf3f",
"size": "1914",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "_pytest/nose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "798946"
}
],
"symlink_target": ""
} |
from .knufactor import *
| {
"content_hash": "7253c5cb759f616734f127e030ba12d0",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.76,
"repo_name": "KnuVerse/knuverse-sdk-python",
"id": "7ad7400e94faa46a877b20629f24f59d34e833d9",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "knuverse/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45203"
}
],
"symlink_target": ""
} |
'''
Created on Nov, 2016
@author: hugo
'''
from __future__ import absolute_import
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import Adadelta
from keras.models import load_model as load_keras_model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from ..utils.keras_utils import Dense_tied, KCompetitive, contractive_loss, CustomModelCheckpoint
class AutoEncoder(object):
"""AutoEncoder for topic modeling.
Parameters
----------
"""
def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'):
self.input_size = input_size
self.dim = dim
self.comp_topk = comp_topk
self.ctype = ctype
self.save_model = save_model
self.build()
def build(self):
# this is our input placeholder
input_layer = Input(shape=(self.input_size,))
# "encoded" is the encoded representation of the input
if self.ctype == None:
act = 'sigmoid'
elif self.ctype == 'kcomp':
act = 'tanh'
elif self.ctype == 'ksparse':
act = 'linear'
else:
raise Exception('unknown ctype: %s' % self.ctype)
encoded_layer = Dense(self.dim, activation=act, kernel_initializer="glorot_normal", name="Encoded_Layer")
encoded = encoded_layer(input_layer)
if self.comp_topk:
print 'add k-competitive layer'
encoded = KCompetitive(self.comp_topk, self.ctype)(encoded)
# "decoded" is the lossy reconstruction of the input
# add non-negativity contraint to ensure probabilistic interpretations
decoded = Dense_tied(self.input_size, activation='sigmoid', tied_to=encoded_layer, name='Decoded_Layer')(encoded)
# this model maps an input to its reconstruction
self.autoencoder = Model(outputs=decoded, inputs=input_layer)
# this model maps an input to its encoded representation
self.encoder = Model(outputs=encoded, inputs=input_layer)
# create a placeholder for an encoded input
encoded_input = Input(shape=(self.dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = self.autoencoder.layers[-1]
# create the decoder model
self.decoder = Model(outputs=decoder_layer(encoded_input), inputs=encoded_input)
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None):
optimizer = Adadelta(lr=2.)
# optimizer = Adam()
# optimizer = Adagrad()
if contractive:
print 'Using contractive loss, lambda: %s' % contractive
self.autoencoder.compile(optimizer=optimizer, loss=contractive_loss(self, contractive))
else:
print 'Using binary crossentropy'
self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
self.autoencoder.fit(train_X[0], train_X[1],
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(val_X[0], val_X[1]),
callbacks=[
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto')
]
)
return self
def save_ae_model(model, model_file):
model.save(model_file)
def load_ae_model(model_file):
return load_keras_model(model_file, custom_objects={"KCompetitive": KCompetitive})
| {
"content_hash": "5ecbb1fd0496188f19b6776578c47bb8",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 142,
"avg_line_length": 39.101010101010104,
"alnum_prop": 0.6140532162231982,
"repo_name": "hugochan/KATE",
"id": "04a547c17b05e100b299b26383c85f04e823eab1",
"size": "3871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoencoder/core/ae.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "180465"
}
],
"symlink_target": ""
} |
from pyVmomi import vim, vmodl
from tools import cli, service_instance
def get_object(content, vimtype, name, disp=False):
"""
Internal method to create objects of various vCenter related classes
:param content:
:param vimtype:
:param name:
:param disp:
:return: Object
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder,
vimtype,
True)
for c in container.view:
if disp:
print("c.name:" + str(c.name))
if c.name == name:
obj = c
break
return obj
def collect_template_disks(vm):
"""
Internal method to collect template disks
:param vm: VM object
:return: list of template disks
"""
template_disks = []
for device in vm.config.hardware.device:
if type(device).__name__ == "vim.vm.device.VirtualDisk":
datastore = device.backing.datastore
print("device.deviceInfo.summary:" + device.deviceInfo.summary)
print("datastore.summary.type:" + datastore.summary.type)
if hasattr(device.backing, 'fileName'):
disk_desc = str(device.backing.fileName)
print("Disc Discription -- {}".format(disk_desc))
drive = disk_desc.split("]")[0].replace("[", "")
print("drive:" + drive)
print("device.backing.fileName:" + device.backing.fileName)
template_disks.append(device)
return template_disks
def construct_locator(template_disks, datastore_dest_id):
"""
Internal method to construct locator for the disks
:param template_disks: list of template_disks
:param datastore_dest_id: ID of destination datastore
:return: locator
"""
ds_disk = []
for index, wdisk in enumerate(template_disks):
print("relocate index:" + str(index))
print("disk:" + str(wdisk))
disk_desc = str(wdisk.backing.fileName)
drive = disk_desc.split("]")[0].replace("[", "")
print("drive:" + drive)
print("wdisk.backing.fileName:" + wdisk.backing.fileName)
locator = vim.vm.RelocateSpec.DiskLocator()
locator.diskBackingInfo = wdisk.backing
locator.diskId = int(wdisk.key)
locator.datastore = datastore_dest_id
ds_disk.append(locator)
return ds_disk
def relocate_vm(vm_name, content, host_dest, datastore_dest=None):
"""
This method relocates vm to the host_dest across
datacenters, clusters, datastores managed by a Vcenter
Args:
vm_name:
content:
host_dest:
datastore_dest:
Returns:
"""
relocation_status = False
message = "relocate_vm passed"
try:
vm = get_object(content, [vim.VirtualMachine], vm_name)
current_host = vm.runtime.host.name
print("vmotion_vm current_host:" + current_host)
# Create Relocate Spec
spec = vim.VirtualMachineRelocateSpec()
# Check whether compute vmotion required and construct spec accordingly
if host_dest is not None:
if current_host == host_dest:
raise Exception("WARNING:: destination_host can not equal "
"current_host")
# Find destination host
destination_host = get_object(content, [vim.HostSystem], host_dest)
print("vmotion_vm destination_host:" + str(destination_host))
spec.host = destination_host
# Find destination Resource pool
resource_pool = destination_host.parent.resourcePool
print("vmotion_vm resource_pool:" + str(resource_pool))
spec.pool = resource_pool
# Check whether storage vmotion required and construct spec accordingly
if datastore_dest is not None:
# collect disks belong to the VM
template_disks = collect_template_disks(vm)
datastore_dest_id = get_object(content,
[vim.Datastore],
datastore_dest)
spec.datastore = datastore_dest_id
spec.disk = construct_locator(template_disks, datastore_dest_id)
print("relocate_vm spec:" + str(spec))
task = vm.RelocateVM_Task(spec)
while task.info.state == vim.TaskInfo.State.running:
continue
relocation_status = True
except Exception as e:
message = "relocate_vm failed for vm:" + vm_name \
+ " with error:" + str(e)
print(message)
return relocation_status, message
def main():
parser = cli.Parser()
parser.add_required_arguments(
cli.Argument.VM_NAME, cli.Argument.DATASTORE_NAME, cli.Argument.ESX_NAME)
args = parser.get_args()
si = service_instance.connect(args)
try:
content = si.RetrieveContent()
# Assigning destination datastores
datastore_dest = args.datastore_name
# Target compute resource
host_dest = args.esx_name
relocate_vm(args.vm_name,
content=content,
host_dest=host_dest,
datastore_dest=datastore_dest)
except vmodl.MethodFault as error:
print("Caught vmodl fault : " + error.msg)
return -1
return 0
# Start program
if __name__ == "__main__":
main()
| {
"content_hash": "0738c3dfb8cfc206b3726db130282fdf",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 81,
"avg_line_length": 33.448484848484846,
"alnum_prop": 0.5838014132995107,
"repo_name": "vmware/pyvmomi-community-samples",
"id": "15dfe108bb152e541a96685d8cb906be3a67ea8f",
"size": "5885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/relocate_vm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1631"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011 Tyler Kenendy <tk@tkte.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import getopt
import urllib
import traceback
try:
import json
except ImportError:
import simplejson as json
from collections import deque
from jawa.classloader import ClassLoader
from jawa.transforms import simple_swap, expand_constants
from burger import website
from burger.roundedfloats import transform_floats
def import_toppings():
"""
Attempts to imports either a list of toppings or, if none were
given, attempts to load all available toppings.
"""
this_dir = os.path.dirname(__file__)
toppings_dir = os.path.join(this_dir, "burger", "toppings")
from_list = []
# Traverse the toppings directory and import everything.
for root, dirs, files in os.walk(toppings_dir):
for file_ in files:
if not file_.endswith(".py"):
continue
elif file_.startswith("__"):
continue
elif file_ == "topping.py":
continue
from_list.append(file_[:-3])
from burger.toppings.topping import Topping
toppings = {}
last = Topping.__subclasses__()
for topping in from_list:
__import__("burger.toppings.%s" % topping)
current = Topping.__subclasses__()
subclasses = list([o for o in current if o not in last])
last = Topping.__subclasses__()
if len(subclasses) == 0:
print("Topping '%s' contains no topping" % topping)
elif len(subclasses) >= 2:
print("Topping '%s' contains more than one topping" % topping)
else:
toppings[topping] = subclasses[0]
return toppings
if __name__ == "__main__":
try:
opts, args = getopt.gnu_getopt(
sys.argv[1:],
"t:o:vd:Dlc",
[
"toppings=",
"output=",
"verbose",
"download=",
"download-latest",
"list",
"compact",
"url="
]
)
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
# Default options
toppings = None
output = sys.stdout
verbose = False
download_jars = []
download_latest = False
list_toppings = False
compact = False
url = None
for o, a in opts:
if o in ("-t", "--toppings"):
toppings = a.split(",")
elif o in ("-o", "--output"):
output = open(a, "w")
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-c", "--compact"):
compact = True
elif o in ("-d", "--download"):
download_jars.append(a)
elif o in ("-D", "--download-latest"):
download_latest = True
elif o in ("-l", "--list"):
list_toppings = True
elif o in ("-s", "--url"):
url = a
# Load all toppings
all_toppings = import_toppings()
# List all of the available toppings,
# as well as their docstring if available.
if list_toppings:
for topping in all_toppings:
print("%s" % topping)
if all_toppings[topping].__doc__:
print(" -- %s\n" % all_toppings[topping].__doc__)
sys.exit(0)
# Get the toppings we want
if toppings is None:
loaded_toppings = all_toppings.values()
else:
loaded_toppings = []
for topping in toppings:
if topping not in all_toppings:
print("Topping '%s' doesn't exist" % topping)
else:
loaded_toppings.append(all_toppings[topping])
class DependencyNode:
def __init__(self, topping):
self.topping = topping
self.provides = topping.PROVIDES
self.depends = topping.DEPENDS
self.childs = []
def __repr__(self):
return str(self.topping)
# Order topping execution by building dependency tree
topping_nodes = []
topping_provides = {}
for topping in loaded_toppings:
topping_node = DependencyNode(topping)
topping_nodes.append(topping_node)
for provides in topping_node.provides:
topping_provides[provides] = topping_node
# Include missing dependencies
for topping in topping_nodes:
for dependency in topping.depends:
if not dependency in topping_provides:
for other_topping in all_toppings.values():
if dependency in other_topping.PROVIDES:
topping_node = DependencyNode(other_topping)
topping_nodes.append(topping_node)
for provides in topping_node.provides:
topping_provides[provides] = topping_node
# Find dependency childs
for topping in topping_nodes:
for dependency in topping.depends:
if not dependency in topping_provides:
print("(%s) requires (%s)" % (topping, dependency))
sys.exit(1)
if not topping_provides[dependency] in topping.childs:
topping.childs.append(topping_provides[dependency])
# Run leaves first
to_be_run = []
while len(topping_nodes) > 0:
stuck = True
for topping in topping_nodes:
if len(topping.childs) == 0:
stuck = False
for parent in topping_nodes:
if topping in parent.childs:
parent.childs.remove(topping)
to_be_run.append(topping.topping)
topping_nodes.remove(topping)
if stuck:
print("Can't resolve dependencies")
sys.exit(1)
jarlist = args
# Download any jars that have already been specified
for version in download_jars:
client_path = website.client_jar(version, verbose)
jarlist.append(client_path)
# Download a copy of the latest snapshot jar
if download_latest:
client_path = website.latest_client_jar(verbose)
jarlist.append(client_path)
# Download a JAR from the given URL
if url:
url_path = urllib.urlretrieve(url)[0]
jarlist.append(url_path)
summary = []
for path in jarlist:
classloader = ClassLoader(path, max_cache=0, bytecode_transforms=[simple_swap, expand_constants])
names = classloader.path_map.keys()
num_classes = sum(1 for name in names if name.endswith(".class"))
aggregate = {
"source": {
"file": path,
"classes": num_classes,
"other": len(names),
"size": os.path.getsize(path)
}
}
available = []
for topping in to_be_run:
missing = [dep for dep in topping.DEPENDS if dep not in available]
if len(missing) != 0:
if verbose:
print("Dependencies failed for %s: Missing %s" % (topping, missing))
continue
orig_aggregate = aggregate.copy()
try:
topping.act(aggregate, classloader, verbose)
available.extend(topping.PROVIDES)
except:
aggregate = orig_aggregate # If the topping failed, don't leave things in an incomplete state
if verbose:
print("Failed to run %s" % topping)
traceback.print_exc()
summary.append(aggregate)
if not compact:
json.dump(transform_floats(summary), output, sort_keys=True, indent=4)
else:
json.dump(transform_floats(summary), output)
# Cleanup temporary downloads (the URL download is temporary)
if url:
os.remove(url_path)
# Cleanup file output (if used)
if output is not sys.stdout:
output.close()
| {
"content_hash": "d1b02799194cc58b21e05d03c52dcab8",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 109,
"avg_line_length": 33.1037037037037,
"alnum_prop": 0.5833519803087939,
"repo_name": "mcdevs/Burger",
"id": "1af24d3ef6efc28843b53d51a413c75fe8cdb5b4",
"size": "8983",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "munch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316113"
}
],
"symlink_target": ""
} |
"""Clash of Clans war moniting for telegram channels."""
__version__ = '0.9.1'
from .__main__ import main, serverless
__all__ = ['main', 'serverless']
| {
"content_hash": "5af03eef761fb315887e517f9e98603e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 22,
"alnum_prop": 0.6298701298701299,
"repo_name": "mehdisadeghi/clashogram",
"id": "628c8b084f80bc43329db3b1d0fb19bd2c183fc2",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clashogram/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "47740"
}
],
"symlink_target": ""
} |
"""Structured error classes in TVM.
Each error class takes an error message as its input.
See the example sections for for suggested message conventions.
To make the code more readable, we recommended developers to
copy the examples and raise errors with the same message convention.
.. note::
Please also refer to :ref:`error-handling-guide`.
"""
from tvm._ffi.base import register_error, TVMError
@register_error
class InternalError(TVMError):
"""Internal error in the system.
Examples
--------
.. code :: c++
// Example code C++
LOG(FATAL) << "InternalError: internal error detail.";
.. code :: python
# Example code in python
raise InternalError("internal error detail")
"""
def __init__(self, msg):
# Patch up additional hint message.
if "TVM hint:" not in msg:
msg += (
"\nTVM hint: You hit an internal error. "
+ "Please open a thread on https://discuss.tvm.apache.org/ to report it."
)
super(InternalError, self).__init__(msg)
register_error("ValueError", ValueError)
register_error("TypeError", TypeError)
register_error("AttributeError", AttributeError)
register_error("KeyError", KeyError)
register_error("IndexError", IndexError)
@register_error
class RPCError(TVMError):
"""Error thrown by the remote server handling the RPC call."""
@register_error
class OpError(TVMError):
"""Base class of all operator errors in frontends."""
@register_error
class OpNotImplemented(OpError, NotImplementedError):
"""Operator is not implemented.
Example
-------
.. code:: python
raise OpNotImplemented(
"Operator {} is not supported in {} frontend".format(
missing_op, frontend_name))
"""
@register_error
class OpAttributeRequired(OpError, AttributeError):
"""Required attribute is not found.
Example
-------
.. code:: python
raise OpAttributeRequired(
"Required attribute {} not found in operator {}".format(
attr_name, op_name))
"""
@register_error
class OpAttributeInvalid(OpError, AttributeError):
"""Attribute value is invalid when taking in a frontend operator.
Example
-------
.. code:: python
raise OpAttributeInvalid(
"Value {} in attribute {} of operator {} is not valid".format(
value, attr_name, op_name))
"""
@register_error
class OpAttributeUnImplemented(OpError, NotImplementedError):
"""Attribute is not supported in a certain frontend.
Example
-------
.. code:: python
raise OpAttributeUnImplemented(
"Attribute {} is not supported in operator {}".format(
attr_name, op_name))
"""
@register_error
class DiagnosticError(TVMError):
"""Error diagnostics were reported during the execution of a pass.
See the configured diagnostic renderer for detailed error information.
"""
| {
"content_hash": "27914c32f037fdbbf820d54e2ddf5efa",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 89,
"avg_line_length": 25.083333333333332,
"alnum_prop": 0.6428571428571429,
"repo_name": "Laurawly/tvm-1",
"id": "9755270ec3bab6c8ae41edb67161d1208234ff9e",
"size": "3795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tvm/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
} |
"""Tests for the C{pywikibot.tools.formatter} module."""
#
# (C) Pywikibot team, 2015-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from pywikibot.tools import formatter
from pywikibot.tools import UnicodeMixin
from tests.aspects import unittest, TestCase
class TestListOutputter(TestCase):
"""Test ListFormatter class."""
net = False
def test_SequenceOutputter(self):
"""Test format method."""
options = ['foo', 'bar']
outputter = formatter.SequenceOutputter(options)
self.assertEqual(outputter.format_list(), '\n 1 - foo\n 2 - bar\n')
outputter.format_string = '({index} {width} {item})'
self.assertEqual(outputter.format_list(), '\n(1 1 foo)\n(2 1 bar)\n')
outputter.format_string = '{item}'
self.assertEqual(outputter.format_list(), '\nfoo\nbar\n')
# TODO: add tests for background colors.
class TestColorFormat(TestCase):
"""Test color_format function in bot module."""
class DummyUnicode(UnicodeMixin):
"""Dummy class that __unicode__ returns a non-ascii unicode value."""
def __unicode__(self):
"""Return ä."""
return 'ä'
net = False
def assert_format(self, format_string, expected, *args, **kwargs):
"""Assert that color_format returns the expected string and type."""
result = formatter.color_format(format_string, *args, **kwargs)
self.assertEqual(result, expected)
self.assertIsInstance(result, type(expected))
def test_no_colors(self):
"""Test without colors in template string."""
self.assert_format('', '')
self.assert_format('42', '42')
self.assert_format('{0}', '42', 42)
self.assert_format('before {0} after', 'before 42 after', 42)
self.assert_format('{ans}', '42', ans=42)
def test_colors(self):
"""Test with colors in template string."""
self.assert_format('{0}{black}', '42\03{black}', 42)
self.assert_format('{ans}{black}', '42\03{black}', ans=42)
self.assertRaisesRegex(
ValueError, r'.*conversion.*', formatter.color_format,
'{0}{black!r}', 42)
self.assertRaisesRegex(
ValueError, r'.*format spec.*', formatter.color_format,
'{0}{black:03}', 42)
def test_marker(self):
r"""Test that the \03 marker is only allowed in front of colors."""
self.assert_format('{0}\03{black}', '42\03{black}', 42)
# literal before a normal field
self.assertRaisesRegex(
ValueError, r'.*\\03', formatter.color_format,
'\03{0}{black}', 42)
# literal before a color field
self.assertRaisesRegex(
ValueError, r'.*\\03', formatter.color_format,
'{0}\03before{black}', 42)
def test_color_kwargs(self):
"""Test with a color as keyword argument."""
self.assertRaises(ValueError,
formatter.color_format, '{aqua}{black}', aqua=42)
def test_non_ascii(self):
"""Test non-ASCII replacements."""
self.assert_format('{0}', 'ä', 'ä')
self.assert_format('{black}{0}', '\03{black}ä', 'ä')
self.assert_format('{0}', 'ä', self.DummyUnicode())
self.assert_format('{black}{0}', '\03{black}ä', self.DummyUnicode())
def test_bytes_format(self):
"""Test that using `bytes` is not allowed."""
self.assertRaises(TypeError, formatter.color_format, b'{0}', 'a')
self.assertRaises(TypeError, formatter.color_format, b'{black}{0}',
'a')
def test_variant_colors(self):
"""Test variant colors with {color} parameter."""
self.assert_format('{0}{color}', '42\03{black}', 42, color='black')
self.assert_format('{ans}{color}', '42\03{black}', ans=42,
color='black')
self.assert_format('{color}', '42', color=42)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| {
"content_hash": "6daa731a1258cea1f3edb1031b15ff9f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 77,
"avg_line_length": 36.09649122807018,
"alnum_prop": 0.5927095990279465,
"repo_name": "PersianWikipedia/pywikibot-core",
"id": "4a4ebcd3f77bd96ca711993ed8a7a1bd06759439",
"size": "4147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tools_formatter_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4021871"
}
],
"symlink_target": ""
} |
##
## Copyright (c) Microsoft. All rights reserved.
## Licensed under the MIT license. See LICENSE file in the project root for full license information.
##
#
#USAGE:
#Add Events: modify <root>src/vm/ClrEtwAll.man
#Look at the Code in <root>/src/inc/genXplatLttng.py for using subroutines in this file
#
import os
import xml.dom.minidom as DOM
from sets import Set
stdprolog="""
//
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
******************************************************************/
"""
stdprolog_cmake="""
#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
#******************************************************************
"""
lindent = " ";
palDataTypeMapping ={
#constructed types
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const ULONG",
"win:count" :"*",
"win:Struct" :"const void",
#actual spec
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:Boolean" :"const BOOL",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Pointer" :"const void*",
"win:Binary" :"const BYTE"
}
# A Template represents an ETW template can contain 1 or more AbstractTemplates
# The AbstractTemplate contains FunctionSignature
# FunctionSignature consist of FunctionParameter representing each parameter in it's signature
class AbstractTemplate:
def __init__(self,abstractTemplateName,abstractFnFrame):
self.abstractTemplateName = abstractTemplateName
self.AbstractFnFrame = abstractFnFrame
class Template:
def __init__(self,templateName):
self.template = templateName
self.allAbstractTemplateTypes = [] # list of AbstractTemplateNames
self.allAbstractTemplateLUT = {} #dictionary of AbstractTemplate
def append(self,abstractTemplateName,abstractFnFrame):
self.allAbstractTemplateTypes.append(abstractTemplateName)
self.allAbstractTemplateLUT[abstractTemplateName] = AbstractTemplate(abstractTemplateName,abstractFnFrame)
def getFnFrame(self,abstractTemplateName):
return self.allAbstractTemplateLUT[abstractTemplateName].AbstractFnFrame
def getAbstractVarProps(self,abstractTemplateName):
return self.allAbstractTemplateLUT[abstractTemplateName].AbstractVarProps
def getFnParam(self,name):
for subtemplate in self.allAbstractTemplateTypes:
frame = self.getFnFrame(subtemplate)
if frame.getParam(name):
return frame.getParam(name)
return None
class FunctionSignature:
def __init__(self):
self.LUT = {} # dictionary of FunctionParameter
self.paramlist = [] # list of parameters to maintain their order in signature
def append(self,variable,fnparam):
self.LUT[variable] = fnparam
self.paramlist.append(variable)
def getParam(self,variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramlist)
class FunctionParameter:
def __init__(self,winType,name,count,prop):
self.winType = winType #ETW type as given in the manifest
self.name = name #parameter name as given in the manifest
self.prop = prop #any special property as determined by the manifest and developer
#self.count #indicates if the parameter is a pointer
if count == "win:null":
self.count = "win:null"
elif count or winType == "win:GUID" or count == "win:count":
#special case for GUIDS, consider them as structs
self.count = "win:count"
else:
self.count = "win:null"
def getTopLevelElementsByTagName(Node,tag):
dataNodes = []
for element in Node.getElementsByTagName(tag):
if element.parentNode == Node:
dataNodes.append(element)
return dataNodes
def bucketizeAbstractTemplates(template,fnPrototypes,var_Dependecies):
# At this point we have the complete argument list, now break them into chunks of 10
# As Abstract Template supports a maximum of 10 arguments
abstractTemplateName = template;
subevent_cnt = 1;
templateProp = Template(template)
abstractFnFrame = FunctionSignature()
for variable in fnPrototypes.paramlist:
for dependency in var_Dependecies[variable]:
if not abstractFnFrame.getParam(dependency):
abstractFnFrame.append(dependency,fnPrototypes.getParam(dependency))
frameCount = abstractFnFrame.getLength()
if frameCount == 10:
templateProp.append(abstractTemplateName,abstractFnFrame)
abstractTemplateName = template + "_" + str(subevent_cnt)
subevent_cnt += 1
if len(var_Dependecies[variable]) > 1:
#check if the frame's dependencies are all present
depExists = True
for depends in var_Dependecies[variable]:
if not abstractFnFrame.getParam(depends):
depExists = False
break
if not depExists:
raise ValueError('Abstract Template: '+ abstractTemplateName+ ' does not have all its dependecies in the frame, write required Logic here and test it out, the parameter whose dependency is missing is :'+ variable)
#psuedo code:
# 1. add a missing dependecies to the frame of the current parameter
# 2. Check if the frame has enough space, if there is continue adding missing dependencies
# 3. Else Save the current Frame and start a new frame and follow step 1 and 2
# 4. Add the current parameter and proceed
#create a new fn frame
abstractFnFrame = FunctionSignature()
#subevent_cnt == 1 represents argumentless templates
if abstractFnFrame.getLength() > 0 or subevent_cnt == 1:
templateProp.append(abstractTemplateName,abstractFnFrame)
return templateProp
ignoredXmlTemplateAttribes = Set(["map","outType"])
usedXmlTemplateAttribes = Set(["name","inType","count", "length"])
def parseTemplateNodes(templateNodes):
#return values
allTemplates = {}
for templateNode in templateNodes:
template = templateNode.getAttribute('tid')
var_Dependecies = {}
fnPrototypes = FunctionSignature()
dataNodes = getTopLevelElementsByTagName(templateNode,'data')
# Validate that no new attributes has been added to manifest
for dataNode in dataNodes:
nodeMap = dataNode.attributes
for attrib in nodeMap.values():
attrib_name = attrib.name
if attrib_name not in ignoredXmlTemplateAttribes and attrib_name not in usedXmlTemplateAttribes:
raise ValueError('unknown attribute: '+ attrib_name + ' in template:'+ template)
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
#count and length are the same
wincount = dataNode.getAttribute('count')
winlength = dataNode.getAttribute('length');
var_Props = None
var_dependency = [variable]
if winlength:
if wincount:
raise Exception("both count and length property found on: " + variable + "in template: " + template)
wincount = winlength
if (wincount.isdigit() and int(wincount) ==1):
wincount = ''
if wincount:
if (wincount.isdigit()):
var_Props = wincount
elif fnPrototypes.getParam(wincount):
var_Props = wincount
var_dependency.insert(0,wincount)
#construct the function signature
if wintype == "win:GUID":
var_Props = "sizeof(GUID)/sizeof(int)"
var_Dependecies[variable] = var_dependency
fnparam = FunctionParameter(wintype,variable,wincount,var_Props)
fnPrototypes.append(variable,fnparam)
structNodes = getTopLevelElementsByTagName(templateNode,'struct')
count = 0;
for structToBeMarshalled in structNodes:
struct_len = "Arg"+ str(count) + "_Struct_Len_"
struct_pointer = "Arg"+ str(count) + "_Struct_Pointer_"
count += 1
#populate the Property- used in codegen
structname = structToBeMarshalled.getAttribute('name')
countVarName = structToBeMarshalled.getAttribute('count')
if not countVarName:
raise ValueError('Struct '+ structname+ ' in template:'+ template + 'does not have an attribute count')
var_Props = countVarName + "*" + struct_len + "/sizeof(int)"
var_Dependecies[struct_len] = [struct_len]
var_Dependecies[struct_pointer] = [countVarName,struct_len,struct_pointer]
fnparam_len = FunctionParameter("win:ULong",struct_len,"win:null",None)
fnparam_pointer = FunctionParameter("win:Struct",struct_pointer,"win:count",var_Props)
fnPrototypes.append(struct_len,fnparam_len)
fnPrototypes.append(struct_pointer,fnparam_pointer)
allTemplates[template] = bucketizeAbstractTemplates(template,fnPrototypes,var_Dependecies)
return allTemplates
def generateClrallEvents(eventNodes,allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("inline BOOL EventEnabled")
clrallEvents.append(eventName)
clrallEvents.append("() {return XplatEventLogger::IsEventLoggingEnabled() && EventXplatEnabled")
clrallEvents.append(eventName+"();}\n\n")
#generate FireEtw functions
fnptype = []
fnbody = []
fnptype.append("inline ULONG FireEtw")
fnptype.append(eventName)
fnptype.append("(\n")
fnbody.append(lindent)
fnbody.append("if (!EventEnabled")
fnbody.append(eventName)
fnbody.append("()) {return ERROR_SUCCESS;}\n")
line = []
fnptypeline = []
if templateName:
for subTemplate in allTemplates[templateName].allAbstractTemplateTypes:
fnSig = allTemplates[templateName].getFnFrame(subTemplate)
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#fnsignature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
line.append(fnparam.name)
line.append(",")
#remove trailing commas
if len(line) > 0:
del line[-1]
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n)\n{\n")
fnbody.append(lindent)
fnbody.append("return FireEtXplat")
fnbody.append(eventName)
fnbody.append("(")
fnbody.extend(line)
fnbody.append(");\n")
fnbody.append("}\n\n")
clrallEvents.extend(fnptype)
clrallEvents.extend(fnbody)
return ''.join(clrallEvents)
def generateClrXplatEvents(eventNodes, allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("extern \"C\" BOOL EventXplatEnabled")
clrallEvents.append(eventName)
clrallEvents.append("();\n")
#generate FireEtw functions
fnptype = []
fnptypeline = []
fnptype.append("extern \"C\" ULONG FireEtXplat")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
for subTemplate in allTemplates[templateName].allAbstractTemplateTypes:
fnSig = allTemplates[templateName].getFnFrame(subTemplate)
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n);\n")
clrallEvents.extend(fnptype)
return ''.join(clrallEvents)
#generates the dummy header file which is used by the VM as entry point to the logging Functions
def generateclrEtwDummy(eventNodes,allTemplates):
clretmEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
fnptype = []
#generate FireEtw functions
fnptype.append("#define FireEtw")
fnptype.append(eventName)
fnptype.append("(");
line = []
if templateName:
for subTemplate in allTemplates[templateName].allAbstractTemplateTypes:
fnSig = allTemplates[templateName].getFnFrame(subTemplate)
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
line.append(fnparam.name)
line.append(", ")
#remove trailing commas
if len(line) > 0:
del line[-1]
fnptype.extend(line)
fnptype.append(") 0\n")
clretmEvents.extend(fnptype)
return ''.join(clretmEvents)
def generateClralltestEvents(sClrEtwAllMan):
tree = DOM.parse(sClrEtwAllMan)
clrtestEvents = []
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
clrtestEvents.append(" EventXplatEnabled" + eventName + "();\n")
clrtestEvents.append("Error |= FireEtXplat" + eventName + "(\n")
line =[]
if templateName :
for subTemplate in allTemplates[templateName].allAbstractTemplateTypes:
fnSig = allTemplates[templateName].getFnFrame(subTemplate)
for params in fnSig.paramlist:
argline =''
fnparam = fnSig.getParam(params)
if fnparam.name.lower() == 'count':
argline = '2'
else:
if fnparam.winType == "win:Binary":
argline = 'win_Binary'
elif fnparam.winType == "win:Pointer" and fnparam.count == "win:count":
argline = "(const void**)&var11"
elif fnparam.winType == "win:Pointer" :
argline = "(const void*)var11"
elif fnparam.winType =="win:AnsiString":
argline = '" Testing AniString "'
elif fnparam.winType =="win:UnicodeString":
argline = 'W(" Testing UnicodeString ")'
else:
if fnparam.count == "win:count":
line.append("&")
argline = fnparam.winType.replace(":","_")
line.append(argline)
line.append(",\n")
#remove trailing commas
if len(line) > 0:
del line[-1]
line.append("\n")
line.append(");\n")
clrtestEvents.extend(line)
return ''.join(clrtestEvents)
def generateSanityTest(sClrEtwAllMan,testDir):
if not os.path.exists(testDir):
os.makedirs(testDir)
cmake_file = testDir + "/CMakeLists.txt"
test_cpp = testDir + "/clralltestevents.cpp"
testinfo = testDir + "/testinfo.dat"
Cmake_file = open(cmake_file,'w')
Test_cpp = open(test_cpp,'w')
Testinfo = open(testinfo,'w')
#CMake File:
print >>Cmake_file, stdprolog_cmake
print >>Cmake_file, """
cmake_minimum_required(VERSION 2.8.12.2)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(SOURCES
"""
print >>Cmake_file, test_cpp
print >>Cmake_file, """
)
include_directories($ENV{__GeneratedIntermediatesDir}/inc)
include_directories(${COREPAL_SOURCE_DIR}/inc/rt)
add_executable(eventprovidertest
${SOURCES}
)
set(EVENT_PROVIDER_DEPENDENCIES "")
set(EVENT_PROVIDER_LINKER_OTPTIONS "")
if(CMAKE_SYSTEM_NAME STREQUAL Linux)
add_definitions(-DFEATURE_EVENT_TRACE=1)
list(APPEND EVENT_PROVIDER_DEPENDENCIES
coreclrtraceptprovider
eventprovider
)
list(APPEND EVENT_PROVIDER_LINKER_OTPTIONS
${EVENT_PROVIDER_DEPENDENCIES}
)
endif(CMAKE_SYSTEM_NAME STREQUAL Linux)
add_dependencies(eventprovidertest ${EVENT_PROVIDER_DEPENDENCIES} coreclrpal)
target_link_libraries(eventprovidertest
coreclrpal
${EVENT_PROVIDER_LINKER_OTPTIONS}
)
"""
print >>Testinfo, """
Copyright (c) Microsoft Corporation. All rights reserved.
#
Version = 1.0
Section = EventProvider
Function = EventProvider
Name = PAL test for FireEtW* and EventEnabled* functions
TYPE = DEFAULT
EXE1 = eventprovidertest
Description
=This is a sanity test to check that there are no crashes in Xplat eventing
"""
#Test.cpp
print >>Test_cpp, stdprolog
print >>Test_cpp, """
/*=====================================================================
**
** Source: clralltestevents.cpp
**
** Purpose: Ensure Correctness of Eventing code
**
**
**===================================================================*/
#include <palsuite.h>
#include <clrxplatevents.h>
typedef struct _Struct1 {
ULONG Data1;
unsigned short Data2;
unsigned short Data3;
unsigned char Data4[8];
} Struct1;
Struct1 var21[2] = { { 245, 13, 14, "deadbea" }, { 542, 0, 14, "deadflu" } };
Struct1* var11 = var21;
Struct1* win_Struct = var21;
GUID win_GUID ={ 245, 13, 14, "deadbea" };
double win_Double =34.04;
ULONG win_ULong = 34;
BOOL win_Boolean = FALSE;
unsigned __int64 win_UInt64 = 114;
unsigned int win_UInt32 = 4;
unsigned short win_UInt16 = 12;
unsigned char win_UInt8 = 9;
int win_Int32 = 12;
BYTE* win_Binary =(BYTE*)var21 ;
int __cdecl main(int argc, char **argv)
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
ULONG Error = ERROR_SUCCESS;
#if defined(FEATURE_EVENT_TRACE)
Trace("\\n Starting functional eventing APIs tests \\n");
"""
print >>Test_cpp, generateClralltestEvents(sClrEtwAllMan)
print >>Test_cpp,"""
/* Shutdown the PAL.
*/
if (Error != ERROR_SUCCESS)
{
Fail("One or more eventing Apis failed\\n ");
return FAIL;
}
Trace("\\n All eventing APIs were fired succesfully \\n");
#endif //defined(FEATURE_EVENT_TRACE)
PAL_Terminate();
return PASS;
}
"""
Cmake_file.close()
Test_cpp.close()
Testinfo.close()
def generateEtmDummyHeader(sClrEtwAllMan,clretwdummy):
tree = DOM.parse(sClrEtwAllMan)
incDir = os.path.dirname(os.path.realpath(clretwdummy))
if not os.path.exists(incDir):
os.makedirs(incDir)
Clretwdummy = open(clretwdummy,'w')
print >>Clretwdummy, stdprolog
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#pal: create etmdummy.h
print >>Clretwdummy,generateclrEtwDummy(eventNodes,allTemplates)
Clretwdummy.close()
def generatePlformIndependentFiles(sClrEtwAllMan,incDir,etmDummyFile, testDir):
tree = DOM.parse(sClrEtwAllMan)
if not os.path.exists(incDir):
os.makedirs(incDir)
generateSanityTest(sClrEtwAllMan,testDir)
generateEtmDummyHeader(sClrEtwAllMan,etmDummyFile)
clrallevents = incDir + "/clretwallmain.h"
clrxplatevents = incDir + "/clrxplatevents.h"
Clrallevents = open(clrallevents,'w')
Clrxplatevents = open(clrxplatevents,'w')
print >>Clrallevents, stdprolog
print >>Clrxplatevents, stdprolog
print >>Clrallevents, "\n#include \"clrxplatevents.h\"\n"
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#vm header:
print >>Clrallevents,generateClrallEvents(eventNodes,allTemplates)
#pal: create clrallevents.h
print >>Clrxplatevents, generateClrXplatEvents(eventNodes,allTemplates)
Clrxplatevents.close()
Clrallevents.close()
class EventExclusions:
def __init__(self):
self.nostack = Set()
self.explicitstack = Set()
self.noclrinstance = Set()
def parseExclusionList(exclusionListFile):
ExclusionFile = open(exclusionListFile,'r')
exclusionInfo = EventExclusions()
for line in ExclusionFile:
line = line.strip()
#remove comments
if not line or line.startswith('#'):
continue
tokens = line.split(':')
#entries starting with nomac are ignored
if "nomac" in tokens:
continue
if len(tokens) > 5:
raise Exception("Invalid Entry " + line + "in "+ exclusionListFile)
eventProvider = tokens[2]
eventTask = tokens[1]
eventSymbol = tokens[4]
if eventProvider == '':
eventProvider = "*"
if eventTask == '':
eventTask = "*"
if eventSymbol == '':
eventSymbol = "*"
entry = eventProvider + ":" + eventTask + ":" + eventSymbol
if tokens[0].lower() == "nostack":
exclusionInfo.nostack.add(entry)
if tokens[0].lower() == "stack":
exclusionInfo.explicitstack.add(entry)
if tokens[0].lower() == "noclrinstanceid":
exclusionInfo.noclrinstance.add(entry)
ExclusionFile.close()
return exclusionInfo
def getStackWalkBit(eventProvider, taskName, eventSymbol, stackSet):
for entry in stackSet:
tokens = entry.split(':')
if len(tokens) != 3:
raise Exception("Error, possible error in the script which introduced the enrty "+ entry)
eventCond = tokens[0] == eventProvider or tokens[0] == "*"
taskCond = tokens[1] == taskName or tokens[1] == "*"
symbolCond = tokens[2] == eventSymbol or tokens[2] == "*"
if eventCond and taskCond and symbolCond:
return False
return True
#Add the miscelaneous checks here
def checkConsistency(sClrEtwAllMan,exclusionListFile):
tree = DOM.parse(sClrEtwAllMan)
exclusionInfo = parseExclusionList(exclusionListFile)
for providerNode in tree.getElementsByTagName('provider'):
stackSupportSpecified = {}
eventNodes = providerNode.getElementsByTagName('event')
templateNodes = providerNode.getElementsByTagName('template')
eventProvider = providerNode.getAttribute('name')
allTemplates = parseTemplateNodes(templateNodes)
for eventNode in eventNodes:
taskName = eventNode.getAttribute('task')
eventSymbol = eventNode.getAttribute('symbol')
eventTemplate = eventNode.getAttribute('template')
eventValue = int(eventNode.getAttribute('value'))
clrInstanceBit = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.noclrinstance)
sLookupFieldName = "ClrInstanceID"
sLookupFieldType = "win:UInt16"
if clrInstanceBit and allTemplates.get(eventTemplate):
# check for the event template and look for a field named ClrInstanceId of type win:UInt16
fnParam = allTemplates[eventTemplate].getFnParam(sLookupFieldName)
if not(fnParam and fnParam.winType == sLookupFieldType):
raise Exception(exclusionListFile + ":No " + sLookupFieldName + " field of type " + sLookupFieldType + " for event symbol " + eventSymbol)
# If some versions of an event are on the nostack/stack lists,
# and some versions are not on either the nostack or stack list,
# then developer likely forgot to specify one of the versions
eventStackBitFromNoStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.nostack)
eventStackBitFromExplicitStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.explicitstack)
sStackSpecificityError = exclusionListFile + ": Error processing event :" + eventSymbol + "(ID" + str(eventValue) + "): This file must contain either ALL versions of this event or NO versions of this event. Currently some, but not all, versions of this event are present\n"
if not stackSupportSpecified.get(eventValue):
# Haven't checked this event before. Remember whether a preference is stated
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
stackSupportSpecified[eventValue] = True
else:
stackSupportSpecified[eventValue] = False
else:
# We've checked this event before.
if stackSupportSpecified[eventValue]:
# When we last checked, a preference was previously specified, so it better be specified here
if eventStackBitFromNoStackList and eventStackBitFromExplicitStackList:
raise Exception(sStackSpecificityError)
else:
# When we last checked, a preference was not previously specified, so it better not be specified here
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
raise Exception(sStackSpecificityError)
import argparse
import sys
def main(argv):
#parse the command line
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--exc', type=str, required=True,
help='full path to exclusion list')
required.add_argument('--inc', type=str, required=True,
help='full path to directory where the header files will be generated')
required.add_argument('--dummy', type=str, required=True,
help='full path to file that will have dummy definitions of FireEtw functions')
required.add_argument('--testdir', type=str, required=True,
help='full path to directory where the test assets will be deployed' )
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
sClrEtwAllMan = args.man
exclusionListFile = args.exc
incdir = args.inc
etmDummyFile = args.dummy
testDir = args.testdir
checkConsistency(sClrEtwAllMan, exclusionListFile)
generatePlformIndependentFiles(sClrEtwAllMan,incdir,etmDummyFile,testDir)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
| {
"content_hash": "39d0886137c53e8d5dac150561b8ce2e",
"timestamp": "",
"source": "github",
"line_count": 807,
"max_line_length": 285,
"avg_line_length": 38.712515489467165,
"alnum_prop": 0.589545789187286,
"repo_name": "taylorjonl/coreclr",
"id": "0f4033e18f219c6b8f0e8987fd908a0e3483ca9a",
"size": "31241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/genXplatEventing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "942827"
},
{
"name": "Awk",
"bytes": "5737"
},
{
"name": "Batchfile",
"bytes": "39533"
},
{
"name": "C",
"bytes": "5629044"
},
{
"name": "C#",
"bytes": "127735352"
},
{
"name": "C++",
"bytes": "67860202"
},
{
"name": "CMake",
"bytes": "546739"
},
{
"name": "Groff",
"bytes": "529523"
},
{
"name": "Groovy",
"bytes": "22817"
},
{
"name": "HTML",
"bytes": "16196"
},
{
"name": "Makefile",
"bytes": "2527"
},
{
"name": "Objective-C",
"bytes": "223938"
},
{
"name": "Perl",
"bytes": "23359"
},
{
"name": "PowerShell",
"bytes": "4805"
},
{
"name": "Python",
"bytes": "69512"
},
{
"name": "Shell",
"bytes": "61628"
},
{
"name": "Smalltalk",
"bytes": "1359502"
},
{
"name": "SuperCollider",
"bytes": "1621"
},
{
"name": "Yacc",
"bytes": "157302"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_web_03.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "931b8cae171c9ccede39ea02324df4b7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 23,
"alnum_prop": 0.6889632107023411,
"repo_name": "anhstudios/swganh",
"id": "5e9f8887b4c8717f9db3ac0bed4bef2794f568c7",
"size": "444",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/structure/general/shared_web_03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
""" utility functions with "chemical know-how"
"""
from __future__ import print_function
import re,os
from rdkit import RDConfig
from rdkit.six.moves import xrange
if not RDConfig.usePgSQL:
_atomDbName = os.path.join(RDConfig.RDDataDir,'atomdb.gdb')
else:
_atomDbName = "::RDData"
def GetAtomicData(atomDict,descriptorsDesired,dBase=_atomDbName,
table='atomic_data',where='',
user='sysdba',password='masterkey',
includeElCounts=0):
""" pulls atomic data from a database
**Arguments**
- atomDict: the dictionary to populate
- descriptorsDesired: the descriptors to pull for each atom
- dBase: the DB to use
- table: the DB table to use
- where: the SQL where clause
- user: the user name to use with the DB
- password: the password to use with the DB
- includeElCounts: if nonzero, valence electron count fields are added to
the _atomDict_
"""
extraFields =['NVAL','NVAL_NO_FULL_F','NVAL_NO_FULL_D','NVAL_NO_FULL']
from rdkit.Dbase import DbModule
cn = DbModule.connect(dBase,user,password)
c = cn.cursor()
descriptorsDesired = [s.upper() for s in descriptorsDesired]
if 'NAME' not in descriptorsDesired:
descriptorsDesired.append('NAME')
if includeElCounts and 'CONFIG' not in descriptorsDesired:
descriptorsDesired.append('CONFIG')
for field in extraFields:
if field in descriptorsDesired:
descriptorsDesired.remove(field)
toPull = ','.join(descriptorsDesired)
command = 'select %s from atomic_data %s'%(toPull,where)
try:
c.execute(command)
except:
print('Problems executing command:',command)
return
res = c.fetchall()
for atom in res:
tDict = {}
for i in xrange(len(descriptorsDesired)):
desc = descriptorsDesired[i]
val = atom[i]
tDict[desc] = val
name=tDict['NAME']
atomDict[name] = tDict
if includeElCounts:
config = atomDict[name]['CONFIG']
atomDict[name]['NVAL'] = ConfigToNumElectrons(config)
atomDict[name]['NVAL_NO_FULL_F'] = ConfigToNumElectrons(config,ignoreFullF=1)
atomDict[name]['NVAL_NO_FULL_D'] = ConfigToNumElectrons(config,ignoreFullD=1)
atomDict[name]['NVAL_NO_FULL'] = ConfigToNumElectrons(config,ignoreFullF=1,
ignoreFullD=1)
def SplitComposition(compStr):
""" Takes a simple chemical composition and turns into a list of element,# pairs.
i.e. 'Fe3Al' -> [('Fe',3),('Al',1)]
**Arguments**
- compStr: the composition string to be processed
**Returns**
- the *composVect* corresponding to _compStr_
**Note**
-this isn't smart enough by half to deal with anything even
remotely subtle, so be gentle.
"""
target = r'([A-Z][a-z]?)([0-9\.]*)'
theExpr = re.compile(target)
matches = theExpr.findall(compStr)
res = []
for match in matches:
if len(match[1])>0:
res.append((match[0],float(match[1])))
else:
res.append((match[0],1))
return res
def ConfigToNumElectrons(config,ignoreFullD=0,ignoreFullF=0):
""" counts the number of electrons appearing in a configuration string
**Arguments**
- config: the configuration string (e.g. '2s^2 2p^4')
- ignoreFullD: toggles not counting full d shells
- ignoreFullF: toggles not counting full f shells
**Returns**
the number of valence electrons
"""
arr = config.split(' ')
nEl = 0
for i in range(1,len(arr)):
l = arr[i].split('^')
incr = int(l[1])
if ignoreFullF and incr==14 and l[0].find('f')!=-1 and len(arr) > 2:
incr = 0
if ignoreFullD and incr==10 and l[0].find('d')!=-1 and len(arr) > 2:
incr = 0
nEl = nEl + incr
return nEl
if __name__ == '__main__':
print(SplitComposition('Fe'))
print(SplitComposition('Fe3Al'))
print(SplitComposition('Fe99PdAl'))
print(SplitComposition('TiNiSiSO12P'))
temp = ['[Xe] 4f^12 6s^2','[Xe] 4f^14 5d^6 6s^2','[Xe] 4f^14 5d^10 6s^2',
'[Xe] 4f^14 5d^10 6s^2 6p^1', '[Xe] 5d^10']
print('ignore all')
for entry in temp:
print(entry, '\t\t\t\t', ConfigToNumElectrons(entry,ignoreFullD=1,ignoreFullF=1))
print('ignore d')
for entry in temp:
print(entry, '\t\t\t\t', ConfigToNumElectrons(entry,ignoreFullD=1,ignoreFullF=0))
print('ignore f')
for entry in temp:
print(entry, '\t\t\t\t', ConfigToNumElectrons(entry,ignoreFullD=0,ignoreFullF=1))
print('ignore None')
for entry in temp:
print(entry, '\t\t\t\t', ConfigToNumElectrons(entry,ignoreFullD=0,ignoreFullF=0))
| {
"content_hash": "1a792d3a4429318d09915b2860a68717",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 85,
"avg_line_length": 28.93167701863354,
"alnum_prop": 0.6358952340060111,
"repo_name": "strets123/rdkit",
"id": "1c9afa109173fe9d3d1eb73c99c1d0ca88abb039",
"size": "4698",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rdkit/utils/chemutils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203078"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7068170"
},
{
"name": "CMake",
"bytes": "584702"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15431"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3033212"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
} |
import sublime
from sublime_plugin import TextCommand, WindowCommand
import os
import re
from ..git_command import GitCommand
from ...common import util
COMMIT_NODE_CHAR = "●"
COMMIT_NODE_CHAR_OPTIONS = "●*"
COMMIT_LINE = re.compile("[%s][ /_\|\-.]*([a-z0-9]{3,})" % COMMIT_NODE_CHAR_OPTIONS)
class GsCompareCommitCommand(WindowCommand, GitCommand):
"""
Show a view of all commits diff between branches.
"""
def run(self, base_commit, target_commit=None, file_path=None, title=None):
self.base_commit = base_commit
self.target_commit = target_commit or "HEAD"
self._file_path = file_path
self.title = title or "COMMIT COMPARISON"
sublime.set_timeout_async(self.run_async)
def run_async(self):
view = util.view.get_scratch_view(self, "compare_commit", read_only=True)
view.settings().set("git_savvy.git_graph_args", self.get_graph_args())
view.settings().set("git_savvy.compare_commit_view.base_commit", self.base_commit)
view.settings().set("git_savvy.compare_commit_view.target_commit", self.target_commit)
view.settings().set("git_savvy.repo_path", self.repo_path)
view.settings().set("git_savvy.compare_commit_view.file_path", self._file_path)
view.settings().set("word_wrap", False)
view.set_syntax_file("Packages/GitSavvy/syntax/graph.sublime-syntax")
view.set_name(self.title)
view.sel().clear()
view.run_command("gs_compare_commit_refresh")
def get_graph_args(self):
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
args = savvy_settings.get("git_graph_args")
if self._file_path:
file_path = self._file_path
file_path = os.path.realpath(file_path)[len(self.repo_path)+1:]
args = args + ["--", file_path]
return args
class GsCompareCommitRefreshCommand(TextCommand, GitCommand):
"""
Refresh view of all commits diff between branches.
"""
def run(self, edit):
diff_contents = self.get_commit_branch_string()
self.view.run_command("gs_replace_view_text", {"text": diff_contents})
def get_commit_branch_string(self):
base_commit = self.view.settings().get("git_savvy.compare_commit_view.base_commit")
target_commit = self.view.settings().get("git_savvy.compare_commit_view.target_commit")
file_path = self.view.settings().get("git_savvy.compare_commit_view.file_path")
if file_path:
diff_contents = "File: {}\n\n".format(file_path)
else:
diff_contents = ""
diff_contents += "Commits on {} and not on {}\n".format(base_commit, target_commit)
args = self.view.settings().get("git_savvy.git_graph_args")
args.insert(1, "{}..{}".format(target_commit, base_commit))
diff_contents += self.git(*args)
diff_contents = diff_contents.replace("*", COMMIT_NODE_CHAR)
diff_contents += "\n\nCommits on {} and not on {}\n".format(target_commit, base_commit)
args[1] = "{}..{}".format(base_commit, target_commit)
diff_contents += self.git(*args)
diff_contents = diff_contents.replace("*", COMMIT_NODE_CHAR)
return diff_contents
class GsCompareCommitShowDiffCommand(TextCommand, GitCommand):
"""
Refresh view of all commits diff between branches.
"""
def run(self, edit):
sublime.set_timeout_async(self.run_async)
def run_async(self):
base_commit = self.view.settings().get("git_savvy.compare_commit_view.base_commit")
target_commit = self.view.settings().get("git_savvy.compare_commit_view.target_commit")
file_path = self.view.settings().get("git_savvy.compare_commit_view.file_path")
self.view.window().run_command("gs_diff", {
"base_commit": base_commit,
"target_commit": target_commit,
"file_path": file_path,
"disable_stage": True,
"title": "DIFF: {}..{}".format(base_commit, target_commit)
})
class GsCompareAgainstReferenceCommand(WindowCommand, GitCommand):
def run(self, target_commit=None, file_path=None):
self._file_path = file_path
self._target_commit = target_commit
sublime.set_timeout_async(self.run_async)
def run_async(self):
self.window.show_input_panel("Ref:", "", self.show_diff, None, self.on_cancel)
def show_diff(self, ref):
self.window.run_command("gs_compare_commit", {
"file_path": self._file_path,
"base_commit": ref,
"target_commit": self._target_commit
})
def on_cancel(self):
self.window.run_command("gs_compare_against", {
"target_commit": self._target_commit,
"file_path": self._file_path
})
class GsCompareAgainstBranchCommand(WindowCommand, GitCommand):
def run(self, target_commit=None, file_path=None):
self._file_path = file_path
self._target_commit = target_commit
sublime.set_timeout_async(self.run_async)
def run_async(self):
self.all_branches = [b.name_with_remote for b in self.get_branches()]
if hasattr(self, '_selected_branch') and self._selected_branch in self.all_branches:
pre_selected_index = self.all_branches.index(self._selected_branch)
else:
pre_selected_index = self.all_branches.index(self.get_current_branch_name())
self.window.show_quick_panel(
self.all_branches,
self.on_branch_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_index
)
def on_branch_selection(self, index):
if index == -1:
self.window.run_command("gs_compare_against", {
"target_commit": self._target_commit,
"file_path": self._file_path
})
return
self._selected_branch = self.all_branches[index]
self.window.run_command("gs_compare_commit", {
"file_path": self._file_path,
"base_commit": self._selected_branch,
"target_commit": self._target_commit
})
class GsCompareAgainstCommand(WindowCommand, GitCommand):
def run(self, target_commit=None, file_path=None, current_file=False):
self._file_path = self.file_path if current_file else file_path
self._target_commit = target_commit
sublime.set_timeout_async(self.run_async)
def run_async(self):
options_array = [
"Branch",
"Reference"
]
self.window.show_quick_panel(
options_array,
self.on_option_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=self.quick_panel_compare_against_idx
)
def on_option_selection(self, index):
if index == -1:
return
self.quick_panel_compare_against_idx = index
if index == 0:
self.window.run_command("gs_compare_against_branch", {
"target_commit": self._target_commit,
"file_path": self._file_path
})
if index == 1:
self.window.run_command("gs_compare_against_reference", {
"target_commit": self._target_commit,
"file_path": self._file_path
})
| {
"content_hash": "7e71ed31e8b236f522899b50bb7051ef",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 95,
"avg_line_length": 37.39593908629442,
"alnum_prop": 0.6135468983303923,
"repo_name": "asfaltboy/GitSavvy",
"id": "38bbf73af4cd31e4d2a9ad5d5cd1e62cf826e4a2",
"size": "7371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/commands/commit_compare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118"
},
{
"name": "HTML",
"bytes": "5477"
},
{
"name": "JavaScript",
"bytes": "84855"
},
{
"name": "Python",
"bytes": "375976"
}
],
"symlink_target": ""
} |
import sys
import os
from UcsSdk import *
# This script acknowledge all the existing UCSM faults via UCS Manager method "FaultAckFaults".
ucsm_ip = '0.0.0.0'
user = 'username'
password = 'password'
try:
handle = UcsHandle()
handle.Login(ucsm_ip,user, password)
idSet = IdSet()
getRsp = handle.GetManagedObject(None, FaultInst.ClassId())
for mo in getRsp:
id = Id()
id.Value = mo.Id
idSet.AddChild(id)
handle.FaultAckFaults(idSet)
handle.Logout()
except Exception, err:
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
| {
"content_hash": "028ce191afe3ba1b8c34b6104cc0faa2",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 95,
"avg_line_length": 19.484848484848484,
"alnum_prop": 0.6749611197511665,
"repo_name": "Havate/havate-openstack",
"id": "9e4cfb6ed12148e90e8186382794929d506f6cd6",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proto-build/gui/UcsSdk-0.5/samples/methodFaultsAckFaults.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407618"
},
{
"name": "HTML",
"bytes": "507406"
},
{
"name": "JavaScript",
"bytes": "25322"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "21665856"
},
{
"name": "Shell",
"bytes": "62617"
}
],
"symlink_target": ""
} |
"""
This module contains a Google Cloud Vertex AI hook.
.. spelling::
aiplatform
au
codepoints
milli
mae
quantile
quantiles
Quantiles
rmse
rmsle
rmspe
wape
prc
roc
Jetson
forecasted
Struct
sentimentMax
TrainingPipeline
targetColumn
optimizationObjective
"""
import warnings
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import (
AutoMLForecastingTrainingJob,
AutoMLImageTrainingJob,
AutoMLTabularTrainingJob,
AutoMLTextTrainingJob,
AutoMLVideoTrainingJob,
datasets,
models,
)
from google.cloud.aiplatform_v1 import JobServiceClient, PipelineServiceClient
from google.cloud.aiplatform_v1.services.pipeline_service.pagers import ListTrainingPipelinesPager
from google.cloud.aiplatform_v1.types import TrainingPipeline
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class AutoMLHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Auto ML APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._job: Optional[
Union[
AutoMLForecastingTrainingJob,
AutoMLImageTrainingJob,
AutoMLTabularTrainingJob,
AutoMLTextTrainingJob,
AutoMLVideoTrainingJob,
]
] = None
def get_pipeline_service_client(
self,
region: Optional[str] = None,
) -> PipelineServiceClient:
"""Returns PipelineServiceClient."""
if region and region != 'global':
client_options = ClientOptions(api_endpoint=f'{region}-aiplatform.googleapis.com:443')
else:
client_options = ClientOptions()
return PipelineServiceClient(
credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_job_service_client(
self,
region: Optional[str] = None,
) -> JobServiceClient:
"""Returns JobServiceClient"""
if region and region != 'global':
client_options = ClientOptions(api_endpoint=f'{region}-aiplatform.googleapis.com:443')
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_auto_ml_tabular_training_job(
self,
display_name: str,
optimization_prediction_type: str,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
) -> AutoMLTabularTrainingJob:
"""Returns AutoMLTabularTrainingJob object"""
return AutoMLTabularTrainingJob(
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_forecasting_training_job(
self,
display_name: str,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
) -> AutoMLForecastingTrainingJob:
"""Returns AutoMLForecastingTrainingJob object"""
return AutoMLForecastingTrainingJob(
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_image_training_job(
self,
display_name: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: Optional[models.Model] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
) -> AutoMLImageTrainingJob:
"""Returns AutoMLImageTrainingJob object"""
return AutoMLImageTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_text_training_job(
self,
display_name: str,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
) -> AutoMLTextTrainingJob:
"""Returns AutoMLTextTrainingJob object"""
return AutoMLTextTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
sentiment_max=sentiment_max,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_video_training_job(
self,
display_name: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
) -> AutoMLVideoTrainingJob:
"""Returns AutoMLVideoTrainingJob object"""
return AutoMLVideoTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
@staticmethod
def extract_model_id(obj: Dict) -> str:
"""Returns unique id of the Model."""
return obj["name"].rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: Optional[float] = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_auto_ml_job(self) -> None:
"""Cancel Auto ML Job for training pipeline"""
if self._job:
self._job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_tabular_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TabularDataset,
target_column: str,
optimization_prediction_type: str,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
sync: bool = True,
) -> models.Model:
"""
Create an AutoML Tabular Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. The name of the column values of which the Model is to predict.
:param optimization_prediction_type: The type of prediction the Model is to produce.
"classification" - Predict one out of multiple target values is picked for each row.
"regression" - Predict a value based on its relation to other values. This type is available only
to columns that contain semantically numeric values, i.e. integers or floating point number, even
if stored as e.g. strings.
:param optimization_objective: Optional. Objective function the Model is to be optimized towards.
The training task creates a Model that maximizes/minimizes the value of the objective function
over the validation set.
The supported optimization objectives depend on the prediction type, and in the case of
classification also the number of distinct values in the target column (two distinct values
-> binary, 3 or more distinct values -> multi class). If the field is not set, the default
objective function is used.
Classification (binary):
"maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC)
curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under the precision-recall curve.
"maximize-precision-at-recall" - Maximize precision for a specified recall value.
"maximize-recall-at-precision" - Maximize recall for a specified precision value.
Classification (multi class):
"minimize-log-loss" (default) - Minimize log loss.
Regression:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param optimization_objective_recall_value: Optional. Required when maximize-precision-at-recall
optimizationObjective was picked, represents the recall value at which the optimization is done.
The minimum value is 0 and the maximum is 1.0.
:param optimization_objective_precision_value: Optional. Required when maximize-recall-at-precision
optimizationObjective was picked, represents the precision value at which the optimization is
done.
The minimum value is 0 and the maximum is 1.0.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, the trained Model will be
secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline. Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns.
The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time`
format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the
key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only
for tabular and time series Datasets. This parameter must be used with training_fraction_split,
validation_fraction_split and test_fraction_split.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param budget_milli_node_hours (int): Optional. The train budget of creating this Model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model
will not exceed this budget. The final cost will be attempted to be close to the budget, though
may end up being (even) noticeably smaller - at the backend's discretion. This especially may
happen when further model training ceases to provide any improvements. If the budget is set to a
value known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions.
Expected format: ``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail. Applies only if
[export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri]
is specified.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
DeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_tabular_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLTabularTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
weight_column=weight_column,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
sync=sync,
)
model.wait()
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_forecasting_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TimeSeriesDataset,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: List[str],
available_at_forecast_columns: List[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
time_series_attribute_columns: Optional[List[str]] = None,
context_window: Optional[int] = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: Optional[List[float]] = None,
validation_options: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
) -> models.Model:
"""
Create an AutoML Forecasting Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For time series
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. Name of the column that the Model is to predict values for.
:param time_column: Required. Name of the column that identifies time order in the time series.
:param time_series_identifier_column: Required. Name of the column that identifies the time series.
:param unavailable_at_forecast_columns: Required. Column names of columns that are unavailable at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is unknown before the forecast (e.g. population of a city
in a given year, or weather on a given day).
:param available_at_forecast_columns: Required. Column names of columns that are available at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is known at forecast.
:param forecast_horizon: Required. The amount of time into the future for which forecasted values for
the target are returned. Expressed in number of units defined by the [data_granularity_unit] and
[data_granularity_count] field. Inclusive.
:param data_granularity_unit: Required. The data granularity unit. Accepted values are ``minute``,
``hour``, ``day``, ``week``, ``month``, ``year``.
:param data_granularity_count: Required. The number of data granularity units between data points in
the training data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all
other values of [data_granularity_unit], must be 1.
:param optimization_objective: Optional. Objective function the model is to be optimized towards. The
training process creates a Model that optimizes the value of the objective function over the
validation set. The supported optimization objectives:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
"minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE).
"minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and
mean-absolute-error (MAE).
"minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles. (Set this
objective to build quantile forecasts.)
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``TRAIN``, ``VALIDATE``, ``TEST``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param time_series_attribute_columns: Optional. Column names that should be used as attribute
columns. Each column is constant within a time series.
:param context_window: Optional. The amount of time into the past training and prediction data is
used for model training and prediction respectively. Expressed in number of units defined by the
[data_granularity_unit] and [data_granularity_count] fields. When not provided uses the default
value of 0 which means the model sets each series context window to be 0 (also known as "cold
start"). Inclusive.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions. Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
:param quantiles: Quantiles to use for the `minizmize-quantile-loss`
[AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in this case.
Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive. Each quantile must be
unique.
:param validation_options: Validation options for the data validation component. The available
options are: "fail-pipeline" - (default), will validate against the validation and fail the
pipeline if it fails. "ignore-validation" - ignore the results of the validation and continue the
pipeline
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will
not exceed this budget. The final cost will be attempted to be close to the budget, though may
end up being (even) noticeably smaller - at the backend's discretion. This especially may happen
when further model training ceases to provide any improvements. If the budget is set to a value
known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
DeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_forecasting_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLForecastingTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
time_column=time_column,
time_series_identifier_column=time_series_identifier_column,
unavailable_at_forecast_columns=unavailable_at_forecast_columns,
available_at_forecast_columns=available_at_forecast_columns,
forecast_horizon=forecast_horizon,
data_granularity_unit=data_granularity_unit,
data_granularity_count=data_granularity_count,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
weight_column=weight_column,
time_series_attribute_columns=time_series_attribute_columns,
context_window=context_window,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
quantiles=quantiles,
validation_options=validation_options,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
model.wait()
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_image_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.ImageDataset,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: Optional[models.Model] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
budget_milli_node_hours: Optional[int] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
sync: bool = True,
) -> models.Model:
"""
Create an AutoML Image Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - Predict one out of multiple target values is picked for each row.
"object_detection" - Predict a value based on its relation to other values. This type is
available only to columns that contain semantically numeric values, i.e. integers or floating
point number, even if stored as e.g. strings.
:param multi_label: Required. Default is False. If false, a single-label (multi-class) Model will be
trained (i.e. assuming that for each image just up to one annotation may be applicable). If true,
a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may
be applicable).
This is only applicable for the "classification" prediction_type and will be ignored otherwise.
:param model_type: Required. One of the following:
"CLOUD" - Default for Image Classification. A Model best tailored to be used within Google Cloud,
and which cannot be exported.
"CLOUD_HIGH_ACCURACY_1" - Default for Image Object Detection. A model best tailored to be used
within Google Cloud, and which cannot be exported. Expected to have a higher latency, but should
also have a higher prediction quality than other cloud models.
"CLOUD_LOW_LATENCY_1" - A model best tailored to be used within Google Cloud, and which cannot be
exported. Expected to have a low latency, but may have lower prediction quality than other cloud
models.
"MOBILE_TF_LOW_LATENCY_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device afterwards.
Expected to have low latency, but may have lower prediction quality than other mobile models.
"MOBILE_TF_VERSATILE_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device with
afterwards.
"MOBILE_TF_HIGH_ACCURACY_1" - A model that, in addition to being available within Google Cloud,
can also be exported as TensorFlow or Core ML model and used on a mobile or edge device
afterwards. Expected to have a higher latency, but should also have a higher prediction quality
than other mobile models.
:param base_model: Optional. Only permitted for Image Classification models. If it is specified, the
new model will be trained based on the `base` model. Otherwise, the new model will be trained
from scratch. The `base` model must be in the same Project and Location as the new Model to
train, and have the same model_type.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour.
Defaults by `prediction_type`:
`classification` - For Cloud models the budget must be: 8,000 - 800,000 milli node hours
(inclusive). The default value is 192,000 which represents one day in wall time, assuming 8 nodes
are used.
`object_detection` - For Cloud models the budget must be: 20,000 - 900,000 milli node hours
(inclusive). The default value is 216,000 which represents one day in wall time, assuming 9 nodes
are used.
The training cost of the model will not exceed this budget. The final cost will be attempted to
be close to the budget, though may end up being (even) noticeably smaller - at the backend's
discretion. This especially may happen when further model training ceases to provide any
improvements. If the budget is set to a value known to be insufficient to train a Model for the
given training set, the training won't be attempted and will error.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_image_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLImageTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
sync=sync,
)
model.wait()
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_text_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TextDataset,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
) -> models.Model:
"""
Create an AutoML Text Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - A classification model analyzes text data and returns a list of categories
that apply to the text found in the data. Vertex AI offers both single-label and multi-label text
classification models.
"extraction" - An entity extraction model inspects text data for known entities referenced in the
data and labels those entities in the text.
"sentiment" - A sentiment analysis model inspects text data and identifies the prevailing
emotional opinion within it, especially to determine a writer's attitude as positive, negative,
or neutral.
:param multi_label: Required and only applicable for text classification task. If false, a
single-label (multi-class) Model will be trained (i.e. assuming that for each text snippet just
up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e.
assuming that for each text snippet multiple annotations may be applicable).
:param sentiment_max: Required and only applicable for sentiment task. A sentiment is expressed as an
integer ordinal, where higher value means a more positive sentiment. The range of sentiments that
will be used is between 0 and sentimentMax (inclusive on both ends), and all the values in the
range must be represented in the dataset before a model can be created. Only the Annotations with
this sentimentMax will be used for training. sentimentMax value must be between 1 and 10
(inclusive).
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can consist of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_text_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
sentiment_max=sentiment_max,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLTextTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
model.wait()
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_video_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.VideoDataset,
prediction_type: str = "classification",
model_type: str = "CLOUD",
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
) -> models.Model:
"""
Create an AutoML Video Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - A video classification model classifies shots and segments in your videos
according to your own defined labels.
"object_tracking" - A video object tracking model detects and tracks multiple objects in shots
and segments. You can use these models to track objects in your videos according to your own
pre-defined, custom labels.
"action_recognition" - A video action recognition model pinpoints the location of actions with
short temporal durations (~1 second).
:param model_type: Required. One of the following:
"CLOUD" - available for "classification", "object_tracking" and "action_recognition" A Model best
tailored to be used within Google Cloud, and which cannot be exported.
"MOBILE_VERSATILE_1" - available for "classification", "object_tracking" and "action_recognition"
A model that, in addition to being available within Google Cloud, can also be exported (see
ModelService.ExportModel) as a TensorFlow or TensorFlow Lite model and used on a mobile or edge
device with afterwards.
"MOBILE_CORAL_VERSATILE_1" - available only for "object_tracking" A versatile model that is meant
to be exported (see ModelService.ExportModel) and used on a Google Coral device.
"MOBILE_CORAL_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on a Google Coral
device.
"MOBILE_JETSON_VERSATILE_1" - available only for "object_tracking" A versatile model that is
meant to be exported (see ModelService.ExportModel) and used on an NVIDIA Jetson device.
"MOBILE_JETSON_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on an NVIDIA
Jetson device.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_video_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLVideoTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
model.wait()
return model
@GoogleBaseHook.fallback_to_default_project_id
def delete_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Deletes a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.delete_training_pipeline(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Gets a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.get_training_pipeline(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_training_pipelines(
self,
project_id: str,
region: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
filter: Optional[str] = None,
read_mask: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListTrainingPipelinesPager:
"""
Lists TrainingPipelines in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_training_pipelines(
request={
'parent': parent,
'page_size': page_size,
'page_token': page_token,
'filter': filter,
'read_mask': read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| {
"content_hash": "a83555313ca4b46e2f151bf2dad409fc",
"timestamp": "",
"source": "github",
"line_count": 1257,
"max_line_length": 133,
"avg_line_length": 60.17661097852029,
"alnum_prop": 0.6709103408159488,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "12c480a28d34cb3b7a66de43bbba53aeca5687b6",
"size": "76431",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.outputs import LDos, Xmu
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
test_dir_reci = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', 'feff_reci_dos')
class FeffLdosTest(unittest.TestCase):
filepath1 = os.path.join(test_dir, 'feff.inp')
filepath2 = os.path.join(test_dir, 'ldos')
l = LDos.from_file(filepath1, filepath2)
reci_feffinp = os.path.join(test_dir_reci, 'feff.inp')
reci_ldos = os.path.join(test_dir_reci, 'ldos')
reci_dos = LDos.from_file(reci_feffinp, reci_ldos)
def test_init(self):
efermi = FeffLdosTest.l.complete_dos.efermi
self.assertEqual(efermi, -11.430,
"Did not read correct Fermi energy from ldos file")
def test_complete_dos(self):
complete_dos = FeffLdosTest.l.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
- 11.430,
"Failed to construct complete_dos dict properly")
def test_as_dict_and_from_dict(self):
l2 = FeffLdosTest.l.charge_transfer_to_string()
d = FeffLdosTest.l.as_dict()
l3 = LDos.from_dict(d).charge_transfer_to_string()
self.assertEqual(l2, l3, "Feffldos to and from dict does not match")
def test_reci_init(self):
efermi = FeffLdosTest.reci_dos.complete_dos.efermi
self.assertEqual(efermi, -9.672,
"Did not read correct Fermi energy from ldos file")
def test_reci_complete_dos(self):
complete_dos = FeffLdosTest.reci_dos.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
-9.672,
"Failed to construct complete_dos dict properly")
def test_reci_charge(self):
charge_trans = FeffLdosTest.reci_dos.charge_transfer
self.assertEqual(charge_trans['0']['Na']['s'], 0.241)
self.assertEqual(charge_trans['1']['O']['tot'], -0.594)
class XmuTest(unittest.TestCase):
def test_init(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
self.assertEqual(x.absorbing_atom, 'O',
"failed to read xmu.dat file properly")
def test_as_dict_and_from_dict(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
data=x.data.tolist()
d=x.as_dict()
x2 = Xmu.from_dict(d)
data2= x2.data.tolist()
self.assertEqual(data, data2, "Xmu to and from dict does not match")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "26370457d588e8e7849e6e3b749babc7",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 36.75,
"alnum_prop": 0.5891156462585034,
"repo_name": "nisse3000/pymatgen",
"id": "1a3bdf93fae5c1a870325a4c2db918dca0a8a4de",
"size": "3050",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/io/feff/tests/test_outputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6934548"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout
app = QApplication(sys.argv)
# Make widgets
window = QWidget()
btn1 = QPushButton("One")
btn2 = QPushButton("Two")
btn3 = QPushButton("Three")
# Set the layout
hbox = QHBoxLayout()
hbox.addStretch(1) # La méthode addStretch permet d'ajouter un espace de largeur variable, et qui s'ajuste en fonction de la largeur de la fenêtre.
hbox.addWidget(btn1)
hbox.addStretch(1) # La méthode addStretch permet d'ajouter un espace de largeur variable, et qui s'ajuste en fonction de la largeur de la fenêtre.
hbox.addWidget(btn2)
hbox.addStretch(1) # La méthode addStretch permet d'ajouter un espace de largeur variable, et qui s'ajuste en fonction de la largeur de la fenêtre.
hbox.addWidget(btn3)
hbox.addStretch(1) # La méthode addStretch permet d'ajouter un espace de largeur variable, et qui s'ajuste en fonction de la largeur de la fenêtre.
hbox.setContentsMargins(0, 0, 0, 0) # set the spacing around the layout (in pixels)
hbox.setSpacing(0) # set the spacing between elements (in pixels)
window.setLayout(hbox)
# Show
window.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| {
"content_hash": "2a8e8d7a6b1e3526977ee425ee2ab63d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 150,
"avg_line_length": 36.8780487804878,
"alnum_prop": 0.7473544973544973,
"repo_name": "jeremiedecock/snippets",
"id": "e8c2520ef0e099305dc3a71437fbb3a0a795a077",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyqt/pyqt5/layout_QHBoxLayout_with_stretch.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
import glob
import sys
import os
import subprocess
import re
########################################################################
#######################################################################
# Check for dependencies
#
# Is there a way to do this more elegantly?
# 1. Run "pip install numpy"
# 2. Wrap inside functions (works for numpy/pysam, but not cython)
try:
import numpy
except ImportError:
raise ImportError(
"the CGAT code collection requires numpy to be installed "
"before running setup.py (pip install numpy)")
try:
import Cython
except ImportError:
raise ImportError(
"the CGAT code collection requires cython to "
"be installed before running setup.py (pip install cython)")
try:
import pysam
except ImportError:
raise ImportError(
"the CGAT code collection requires pysam to "
"be installed before running setup.py (pip install pysam)")
########################################################################
########################################################################
# Import setuptools
# Use existing setuptools, otherwise try ez_setup.
try:
import setuptools
except ImportError:
# try to get via ez_setup
# ez_setup did not work on all machines tested as
# it uses curl with https protocol, which is not
# enabled in ScientificLinux
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
from distutils.version import LooseVersion
if LooseVersion(setuptools.__version__) < LooseVersion('1.1'):
print(("Version detected:", LooseVersion(setuptools.__version__)))
raise ImportError(
"the CGAT code collection requires setuptools 1.1 higher")
from Cython.Distutils import build_ext
########################################################################
########################################################################
IS_OSX = sys.platform == 'darwin'
########################################################################
########################################################################
# collect CGAT version
sys.path.insert(0, "scripts")
import version
version = version.__version__
###############################################################
###############################################################
# Check for external dependencies
#
# Not exhaustive, simply execute a representative tool from a toolkit.
external_dependencies = (
("wigToBigWig", "UCSC tools", 255),
("bedtools", "bedtools", 0),
)
for tool, toolkit, expected in external_dependencies:
try:
# py3k
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
retcode = subprocess.call(tool, shell=True,
stdout=DEVNULL, stderr=DEVNULL)
except OSError as msg:
print(("WARNING: depency check for %s failed: %s" % (toolkit, msg)))
# UCSC tools return 255 when called without arguments
if retcode != expected:
print(("WARNING: depency check for %s(%s) failed, error %i" %
(toolkit, tool, retcode)))
###############################################################
###############################################################
# Define dependencies
#
# Perform a CGAT Code Collection Installation
INSTALL_CGAT_CODE_COLLECTION = True
major, minor1, minor2, s, tmp = sys.version_info
if (major == 2 and minor1 < 7) or major < 2:
raise SystemExit("""CGAT requires Python 2.7 or later.""")
#####################################################################
#####################################################################
# Code to install dependencies from a repository
#####################################################################
# Modified from http://stackoverflow.com/a/9125399
#####################################################################
def which(program):
"""
Detect whether or not a program is installed.
Thanks to http://stackoverflow.com/a/377028/70191
"""
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
REPO_REQUIREMENT = re.compile(
r'^-e (?P<link>(?P<vcs>git|svn|hg|bzr).+#egg=(?P<package>.+)-(?P<version>\d(?:\.\d)*))$')
HTTPS_REQUIREMENT = re.compile(
r'^-e (?P<link>.*).+#(?P<package>.+)-(?P<version>\d(?:\.\d)*)$')
install_requires = []
dependency_links = []
for requirement in (
l.strip() for l in open('requires.txt') if not l.startswith("#")):
match = REPO_REQUIREMENT.match(requirement)
if match:
assert which(match.group('vcs')) is not None, \
("VCS '%(vcs)s' must be installed in order to "
"install %(link)s" % match.groupdict())
install_requires.append("%(package)s==%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
if requirement.startswith("https"):
install_requires.append(requirement)
continue
match = HTTPS_REQUIREMENT.match(requirement)
if match:
install_requires.append("%(package)s>=%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
install_requires.append(requirement)
if major == 2:
install_requires.extend(['web.py>=0.37',
'xlwt>=0.7.4',
'matplotlib-venn>=0.5'])
elif major == 3:
pass
if INSTALL_CGAT_CODE_COLLECTION:
cgat_packages = find_packages(exclude=["CGATPipelines*", "scripts*"])
else:
cgat_packages = find_packages(exclude=["scripts*"])
# rename scripts to CGATScripts
cgat_packages.append("CGATScripts")
cgat_package_dirs = {'CGAT': 'CGAT',
'CGATScripts': 'scripts',
'CGATPipelines': 'CGATPipelines'}
##########################################################
##########################################################
# Classifiers
classifiers = """
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
setup(
# package information
name='CGATPipelines',
version=version,
description='CGAT : the Computational Genomics Analysis Toolkit',
author='Andreas Heger',
author_email='andreas.heger@gmail.com',
license="MIT",
platforms=["any"],
keywords="computational genomics",
long_description='CGAT : the Computational Genomics Analysis Toolkit',
classifiers=[_f for _f in classifiers.split("\n") if _f],
url="http://www.cgat.org/cgat/Tools/",
# package contents
packages=cgat_packages,
package_dir=cgat_package_dirs,
include_package_data=True,
entry_points={
'console_scripts': ['cgatflow = CGATPipelines.cgatflow:main']
},
# dependencies
install_requires=install_requires,
dependency_links=dependency_links,
# extension modules
ext_modules=[],
cmdclass={'build_ext': build_ext},
# other options
zip_safe=False,
test_suite="tests",
)
| {
"content_hash": "a21999b12f4e7fde97b8bb20739085c5",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 93,
"avg_line_length": 32.5948275862069,
"alnum_prop": 0.5462840518381381,
"repo_name": "CGATOxford/CGATPipelines",
"id": "fb7cfce00a7c7f9a1e9c8bbca51263f6b37ae5f0",
"size": "7562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
} |
"""
TCP-Server通道类
通过调用管理类对象的process_data函数实现信息的发送。
"""
import logging
from libs.base_channel import BaseChannel
logger = logging.getLogger('plugin')
class TCPServerChannel(BaseChannel):
def __init__(self, channel_params, devices_file_name, protocol, mqtt_client, network_name):
BaseChannel.__init__(self, channel_params, devices_file_name, protocol, mqtt_client, network_name)
@staticmethod
def check_config(channel_params):
return BaseChannel.check_config(channel_params)
def run(self):
# 首先上报设备数据
for device_id in self.devices_info_dict:
device_info = self.devices_info_dict[device_id]
device_msg = {
"device_id": device_info["device_id"],
"device_type": device_info["device_type"],
"device_addr": device_info["device_addr"],
"device_port": device_info["device_port"],
"protocol": self.protocol.protocol_type,
"data": ""
}
self.mqtt_client.publish_data(device_msg)
pass
def process_cmd(self, device_cmd_msg):
pass | {
"content_hash": "1685607a55e32c0a042ea8aac23ec4fc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 106,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.6109154929577465,
"repo_name": "lianwutech/plugin_xxx_yyy",
"id": "d026a044d8c4044b7b76f7f6abe30d81521e7c50",
"size": "1244",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "channels/tcpserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93912"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="bar.outsidetextfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "ca6cc5c7e29be0b4a87c1ff376996b5d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.5880149812734082,
"repo_name": "plotly/python-api",
"id": "880154a6e75cad6088276243e1f6ad889853498a",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/outsidetextfont/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import logging
from json import dumps
from xml.etree.ElementTree import ElementTree
from cms.utils.compat.dj import is_installed
from dateutil.parser import parse
from django.utils.timezone import make_aware
from ..conf import settings
if is_installed('django.contrib.redirects'):
from django.contrib.redirects.models import Redirect
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def create_redirect(old_url, new_url):
old_path = urlparse(old_url).path
new_path = urlparse(new_url).path
if old_path != '/' and new_path != old_path:
redirect = Redirect.objects.get_or_create(
site_id=settings.SITE_ID,
old_path=urlparse(old_path).path,
)[0]
redirect.new_path = new_path
redirect.save()
return redirect
else:
def create_redirect(old_url, new_url):
pass
def import_wordpress(xmlfile):
from .models import Author, Category, Item
try:
rss = ElementTree(file=xmlfile).getroot()
assert rss.tag == 'rss'
except Exception as e:
raise Exception('Failed to parse file {}: {}'.format(xmlfile, e))
imported_items = 0
errors = []
# import authors
authors = {}
for author in rss.findall('*/{http://wordpress.org/export/1.2/}author'):
author_id = 'unknown'
try:
# first of all try to parse author_id for use in potential error messages
author_id = int(author.find('{http://wordpress.org/export/1.2/}author_id').text)
login = author.find('{http://wordpress.org/export/1.2/}author_login').text
email = author.find('{http://wordpress.org/export/1.2/}author_email').text
first_name = author.find('{http://wordpress.org/export/1.2/}author_first_name').text
last_name = author.find('{http://wordpress.org/export/1.2/}author_last_name').text
except Exception as e:
error = 'Failed to parse author with author_id {}: {}'.format(author_id, e)
logging.warning(error)
errors.append(error)
continue
try:
author = Author.objects.get_or_create(
author_id=author_id,
login=login,
email=email,
first_name=first_name,
last_name=last_name,
)[0]
except Exception as e:
error = 'Failed to save author with author_id {}: {}'.format(author_id, e)
logging.warning(error)
errors.append(error)
continue
authors[login] = author
# import categories
categories = {}
for category in rss.findall('*/{http://wordpress.org/export/1.2/}category'):
term_id = 'unknown'
try:
# first of all try to parse term_id for use in potential error messages
term_id = int(category.find('{http://wordpress.org/export/1.2/}term_id').text)
name = category.find('{http://wordpress.org/export/1.2/}cat_name').text
slug = category.find('{http://wordpress.org/export/1.2/}category_nicename').text
parent = category.find('{http://wordpress.org/export/1.2/}category_parent').text
except Exception as e:
error = 'Failed to parse category with term_id {}: {}'.format(term_id, e)
logging.warning(error)
errors.append(error)
raise
continue
try:
category = Category.objects.get_or_create(
term_id=term_id,
name=name,
slug=slug,
parent=parent,
)[0]
except Exception as e:
error = 'Failed to save category with term_id {}: {}'.format(term_id, e)
logging.warning(error)
errors.append(error)
continue
categories[slug] = category
# import items
for item in rss.findall('*/item'):
post_id = 'unknown'
try:
# first of all try to parse post_id for use in potential error messages
post_id = int(item.find('{http://wordpress.org/export/1.2/}post_id').text)
title = item.find('title').text or ''
link = item.find('link').text or ''
pub_date = parse(item.find('pubDate').text)
created_by = item.find('{http://purl.org/dc/elements/1.1/}creator').text
guid = item.find('guid').text
description = item.find('description').text or ''
content = item.find('{http://purl.org/rss/1.0/modules/content/}encoded').text or ''
excerpt = item.find('{http://wordpress.org/export/1.2/excerpt/}encoded').text or ''
post_date = make_aware(parse(item.find('{http://wordpress.org/export/1.2/}post_date').text))
post_name = item.find('{http://wordpress.org/export/1.2/}post_name').text or ''
status = item.find('{http://wordpress.org/export/1.2/}status').text
post_parent = int(item.find('{http://wordpress.org/export/1.2/}post_parent').text)
post_type = item.find('{http://wordpress.org/export/1.2/}post_type').text
postmeta = dumps(dict(
(
pm.find('{http://wordpress.org/export/1.2/}meta_key').text,
pm.find('{http://wordpress.org/export/1.2/}meta_value').text,
)
for pm in item.findall('{http://wordpress.org/export/1.2/}postmeta')
))
cats = [
categories[cat.attrib['nicename']]
for cat in item.findall('category')
if cat.attrib['nicename'] in categories
]
except Exception as e:
error = 'Failed to parse item with post_id {}: {}'.format(post_id, e)
logging.warning(error)
errors.append(error)
continue
try:
item = Item.objects.create(
title=title,
link=link,
pub_date=pub_date,
created_by=authors[created_by],
guid=guid,
description=description,
content=content,
excerpt=excerpt,
post_id=post_id,
post_date=post_date,
post_name=post_name,
status=status,
post_parent=post_parent,
post_type=post_type,
postmeta=postmeta,
)
item.categories = cats
except Exception as e:
error = 'Failed to save item with post_id {}: {}'.format(post_id, e)
logging.warning(error)
errors.append(error)
continue
imported_items += 1
return {
'authors': len(authors),
'categories': len(categories),
'items': imported_items,
'errors': errors,
}
| {
"content_hash": "ead57b1098c8afed96b3053fbbb3397a",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 104,
"avg_line_length": 40.41279069767442,
"alnum_prop": 0.5535894115954539,
"repo_name": "misli/django-cms-articles",
"id": "4734d43bc7ea056dd4fde3f499dc8b0b14f87678",
"size": "6951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms_articles/import_wordpress/utils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "717"
},
{
"name": "HTML",
"bytes": "12453"
},
{
"name": "JavaScript",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "171047"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual(__, result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual(__, result)
def test_if_then_elif_else_statements(self):
if False:
result = 'first value'
elif True:
result = 'true value'
else:
result = 'default value'
self.assertEqual(__, result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(__, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(__, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual(__, result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual([__, __, __], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or Amazonian Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = __
self.assertRegex(result[2], text)
self.assertNotRegex(result[0], text)
self.assertNotRegex(result[1], text)
self.assertNotRegex(result[3], text)
| {
"content_hash": "18351213f1e3114b6534fc4b74b67c62",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 84,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.4992833253702819,
"repo_name": "bordeltabernacle/python_koans",
"id": "2ed357bcb3a1fda5ae61e6ba43ce99bebad02fef",
"size": "2140",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python3/koans/about_control_statements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "329252"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
} |
import win32ui
from pywin.mfc import docview
from pywin import default_scintilla_encoding
import scintillacon
import win32con
import string
import os
import codecs
import re
crlf_bytes = "\r\n".encode("ascii")
lf_bytes = "\n".encode("ascii")
# re from pep263 - but we use it both on bytes and strings.
re_encoding_bytes = re.compile("coding[:=]\s*([-\w.]+)".encode("ascii"))
re_encoding_text = re.compile("coding[:=]\s*([-\w.]+)")
ParentScintillaDocument=docview.Document
class CScintillaDocument(ParentScintillaDocument):
"A SyntEdit document. "
def __init__(self, *args):
self.bom = None # the BOM, if any, read from the file.
# the encoding we detected from the source. Might have
# detected via the BOM or an encoding decl. Note that in
# the latter case (ie, while self.bom is None), it can't be
# trusted - the user may have edited the encoding decl between
# open and save.
self.source_encoding = None
ParentScintillaDocument.__init__(self, *args)
def DeleteContents(self):
pass
def OnOpenDocument(self, filename):
# init data members
#print "Opening", filename
self.SetPathName(filename) # Must set this early!
try:
# load the text as binary we can get smart
# about detecting any existing EOL conventions.
f = open(filename, 'rb')
try:
self._LoadTextFromFile(f)
finally:
f.close()
except IOError:
win32ui.MessageBox("Could not load the file from %s" % filename)
return 0
return 1
def SaveFile(self, fileName):
view = self.GetFirstView()
ok = view.SaveTextFile(fileName)
if ok:
view.SCISetSavePoint()
return ok
def ApplyFormattingStyles(self):
self._ApplyOptionalToViews("ApplyFormattingStyles")
# #####################
# File related functions
# Helper to transfer text from the MFC document to the control.
def _LoadTextFromFile(self, f):
# detect EOL mode - we don't support \r only - so find the
# first '\n' and guess based on the char before.
l = f.readline()
l2 = f.readline()
# If line ends with \r\n or has no line ending, use CRLF.
if l.endswith(crlf_bytes) or not l.endswith(lf_bytes):
eol_mode = scintillacon.SC_EOL_CRLF
else:
eol_mode = scintillacon.SC_EOL_LF
# Detect the encoding - first look for a BOM, and if not found,
# look for a pep263 encoding declaration.
for bom, encoding in (
(codecs.BOM_UTF8, "utf8"),
(codecs.BOM_UTF16_LE, "utf_16_le"),
(codecs.BOM_UTF16_BE, "utf_16_be"),
):
if l.startswith(bom):
self.bom = bom
self.source_encoding = encoding
l = l[len(bom):] # remove it.
break
else:
# no bom detected - look for pep263 encoding decl.
for look in (l, l2):
# Note we are looking at raw bytes here: so
# both the re itself uses bytes and the result
# is bytes - but we need the result as a string.
match = re_encoding_bytes.search(look)
if match is not None:
self.source_encoding = match.group(1).decode("ascii")
break
# reading by lines would be too slow? Maybe we can use the
# incremental encoders? For now just stick with loading the
# entire file in memory.
text = l + l2 + f.read()
# Translate from source encoding to UTF-8 bytes for Scintilla
source_encoding = self.source_encoding
# If we don't know an encoding, just use latin-1 to treat
# it as bytes...
if source_encoding is None:
source_encoding = 'latin1'
# we could optimize this by avoiding utf8 to-ing and from-ing,
# but then we would lose the ability to handle invalid utf8
# (and even then, the use of encoding aliases makes this tricky)
# To create an invalid utf8 file:
# >>> open(filename, "wb").write(codecs.BOM_UTF8+"bad \xa9har\r\n")
try:
dec = text.decode(source_encoding)
except UnicodeError:
print "WARNING: Failed to decode bytes from %r encoding - treating as latin1" % source_encoding
dec = text.decode('latin1')
# and put it back as utf8 - this shouldn't fail.
text = dec.encode(default_scintilla_encoding)
view = self.GetFirstView()
if view.IsWindow():
# Turn off undo collection while loading
view.SendScintilla(scintillacon.SCI_SETUNDOCOLLECTION, 0, 0)
# Make sure the control isnt read-only
view.SetReadOnly(0)
view.SendScintilla(scintillacon.SCI_CLEARALL)
view.SendMessage(scintillacon.SCI_ADDTEXT, text)
view.SendScintilla(scintillacon.SCI_SETUNDOCOLLECTION, 1, 0)
view.SendScintilla(win32con.EM_EMPTYUNDOBUFFER, 0, 0)
# set EOL mode
view.SendScintilla(scintillacon.SCI_SETEOLMODE, eol_mode)
def _SaveTextToFile(self, view, f):
s = view.GetTextRange() # already decoded from scintilla's encoding
source_encoding = None
if self.bom:
f.write(self.bom)
source_encoding = self.source_encoding
else:
# no BOM - look for an encoding.
bits = re.split("[\r\n]*", s, 3)
for look in bits[:-1]:
match = re_encoding_text.search(look)
if match is not None:
source_encoding = match.group(1)
self.source_encoding = source_encoding
break
if source_encoding is None:
source_encoding = 'latin1'
f.write(s.encode(source_encoding))
self.SetModifiedFlag(0)
def FinalizeViewCreation(self, view):
pass
def HookViewNotifications(self, view):
parent = view.GetParentFrame()
parent.HookNotify(ViewNotifyDelegate(self, "OnBraceMatch"), scintillacon.SCN_CHECKBRACE)
parent.HookNotify(ViewNotifyDelegate(self, "OnMarginClick"), scintillacon.SCN_MARGINCLICK)
parent.HookNotify(ViewNotifyDelegate(self, "OnNeedShown"), scintillacon.SCN_NEEDSHOWN)
parent.HookNotify(DocumentNotifyDelegate(self, "OnSavePointReached"), scintillacon.SCN_SAVEPOINTREACHED)
parent.HookNotify(DocumentNotifyDelegate(self, "OnSavePointLeft"), scintillacon.SCN_SAVEPOINTLEFT)
parent.HookNotify(DocumentNotifyDelegate(self, "OnModifyAttemptRO"), scintillacon.SCN_MODIFYATTEMPTRO)
# Tell scintilla what characters should abort auto-complete.
view.SCIAutoCStops(string.whitespace+"()[]:;+-/*=\\?'!#@$%^&,<>\"'|" )
if view != self.GetFirstView():
view.SCISetDocPointer(self.GetFirstView().SCIGetDocPointer())
def OnSavePointReached(self, std, extra):
self.SetModifiedFlag(0)
def OnSavePointLeft(self, std, extra):
self.SetModifiedFlag(1)
def OnModifyAttemptRO(self, std, extra):
self.MakeDocumentWritable()
# All Marker functions are 1 based.
def MarkerAdd( self, lineNo, marker ):
self.GetEditorView().SCIMarkerAdd(lineNo-1, marker)
def MarkerCheck(self, lineNo, marker ):
v = self.GetEditorView()
lineNo = lineNo - 1 # Make 0 based
markerState = v.SCIMarkerGet(lineNo)
return markerState & (1<<marker) != 0
def MarkerToggle( self, lineNo, marker ):
v = self.GetEditorView()
if self.MarkerCheck(lineNo, marker):
v.SCIMarkerDelete(lineNo-1, marker)
else:
v.SCIMarkerAdd(lineNo-1, marker)
def MarkerDelete( self, lineNo, marker ):
self.GetEditorView().SCIMarkerDelete(lineNo-1, marker)
def MarkerDeleteAll( self, marker ):
self.GetEditorView().SCIMarkerDeleteAll(marker)
def MarkerGetNext(self, lineNo, marker):
return self.GetEditorView().SCIMarkerNext( lineNo-1, 1 << marker )+1
def MarkerAtLine(self, lineNo, marker):
markerState = self.GetEditorView().SCIMarkerGet(lineNo-1)
return markerState & (1<<marker)
# Helper for reflecting functions to views.
def _ApplyToViews(self, funcName, *args):
for view in self.GetAllViews():
func = getattr(view, funcName)
func(*args)
def _ApplyOptionalToViews(self, funcName, *args):
for view in self.GetAllViews():
func = getattr(view, funcName, None)
if func is not None:
func(*args)
def GetEditorView(self):
# Find the first frame with a view,
# then ask it to give the editor view
# as it knows which one is "active"
try:
frame_gev = self.GetFirstView().GetParentFrame().GetEditorView
except AttributeError:
return self.GetFirstView()
return frame_gev()
# Delegate to the correct view, based on the control that sent it.
class ViewNotifyDelegate:
def __init__(self, doc, name):
self.doc = doc
self.name = name
def __call__(self, std, extra):
(hwndFrom, idFrom, code) = std
for v in self.doc.GetAllViews():
if v.GetSafeHwnd() == hwndFrom:
return getattr(v, self.name)(*(std, extra))
# Delegate to the document, but only from a single view (as each view sends it seperately)
class DocumentNotifyDelegate:
def __init__(self, doc, name):
self.doc = doc
self.delegate = getattr(doc, name)
def __call__(self, std, extra):
(hwndFrom, idFrom, code) = std
if hwndFrom == self.doc.GetEditorView().GetSafeHwnd():
self.delegate(*(std, extra))
| {
"content_hash": "148a607ee16aa7591451129eb5b8fb74",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 106,
"avg_line_length": 33.87698412698413,
"alnum_prop": 0.7083284526180157,
"repo_name": "slozier/ironpython2",
"id": "d472aea2e6768bb5516b71c7e6ea576d4ccec94e",
"size": "8537",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/site-packages/pythonwin/pywin/scintilla/document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "Batchfile",
"bytes": "4080"
},
{
"name": "C",
"bytes": "20290"
},
{
"name": "C#",
"bytes": "12157325"
},
{
"name": "C++",
"bytes": "69156"
},
{
"name": "HTML",
"bytes": "13181412"
},
{
"name": "JavaScript",
"bytes": "1656"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "67035"
},
{
"name": "Python",
"bytes": "26565467"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "193"
},
{
"name": "Smalltalk",
"bytes": "3"
},
{
"name": "VBScript",
"bytes": "974"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.