text stringlengths 4 1.02M | meta dict |
|---|---|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "radiotrack.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "f65bc19b9c3d9d16da77375ea55248a4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.6221374045801527,
"repo_name": "clede/Radiotrack",
"id": "b75b2175bf995a3cb4e1e4a21fa6fea2d50c76ab",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7157"
},
{
"name": "Python",
"bytes": "118949"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from profiles.models import Profile
from rest_framework import viewsets, response, permissions
from .serializers import UserSerializer, ProfileSerializer
from django.http import HttpResponse, HttpResponseRedirect
def home(request):
return HttpResponseRedirect("/api/")
class UsersViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated,)
def retrieve(self, request, pk=None):
return super(UsersViewSet, self).retrieve(request, pk)
class ProfilesViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = (permissions.IsAuthenticated,)
def retrieve(self, request, pk=None):
return super(ProfilesViewSet, self).retrieve(request, pk)
def current_user(request):
serializer = UserSerializer(request.user)
return Response(serializer.data)
| {
"content_hash": "3c8b3d849d5fda518aa2533bf170d17d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 31.28125,
"alnum_prop": 0.7662337662337663,
"repo_name": "jparicka/django-react-auth",
"id": "e352abd1b404ccbdb9d22379e9b1928698b51ea3",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4933"
},
{
"name": "JavaScript",
"bytes": "1129110"
},
{
"name": "Python",
"bytes": "15827"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SparkSessionOperations:
"""SparkSessionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.spark.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_spark_sessions(
self,
from_parameter: Optional[int] = None,
size: Optional[int] = None,
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSessionCollection":
"""List all spark sessions which are running under a particular spark pool.
:param from_parameter: Optional param specifying which index the list should begin from.
:type from_parameter: int
:param size: Optional param specifying the size of the returned list.
By default it is 20 and that is the maximum.
:type size: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSessionCollection, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSessionCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSessionCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_sessions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if from_parameter is not None:
query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int')
if size is not None:
query_parameters['size'] = self._serialize.query("size", size, 'int')
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSessionCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_sessions.metadata = {'url': '/sessions'} # type: ignore
async def create_spark_session(
self,
spark_session_options: "models.SparkSessionOptions",
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSession":
"""Create new spark session.
:param spark_session_options: Livy compatible batch job request payload.
:type spark_session_options: ~azure.synapse.spark.models.SparkSessionOptions
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSession, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSession
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSession"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(spark_session_options, 'SparkSessionOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSession', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_spark_session.metadata = {'url': '/sessions'} # type: ignore
async def get_spark_session(
self,
session_id: int,
detailed: Optional[bool] = None,
**kwargs
) -> "models.SparkSession":
"""Gets a single spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkSession, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkSession
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkSession"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkSession', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_session.metadata = {'url': '/sessions/{sessionId}'} # type: ignore
async def cancel_spark_session(
self,
session_id: int,
**kwargs
) -> None:
"""Cancels a running spark session.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.cancel_spark_session.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
cancel_spark_session.metadata = {'url': '/sessions/{sessionId}'} # type: ignore
async def reset_spark_session_timeout(
self,
session_id: int,
**kwargs
) -> None:
"""Sends a keep alive call to the current session to reset the session timeout.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.reset_spark_session_timeout.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
reset_spark_session_timeout.metadata = {'url': '/sessions/{sessionId}/reset-timeout'} # type: ignore
async def get_spark_statements(
self,
session_id: int,
**kwargs
) -> "models.SparkStatementCollection":
"""Gets a list of statements within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatementCollection, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatementCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatementCollection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_statements.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatementCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_statements.metadata = {'url': '/sessions/{sessionId}/statements'} # type: ignore
async def create_spark_statement(
self,
session_id: int,
spark_statement_options: "models.SparkStatementOptions",
**kwargs
) -> "models.SparkStatement":
"""Create statement within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param spark_statement_options: Livy compatible batch job request payload.
:type spark_statement_options: ~azure.synapse.spark.models.SparkStatementOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatement, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatement
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatement"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(spark_statement_options, 'SparkStatementOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatement', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements'} # type: ignore
async def get_spark_statement(
self,
session_id: int,
statement_id: int,
**kwargs
) -> "models.SparkStatement":
"""Gets a single statement within a spark session.
:param session_id: Identifier for the session.
:type session_id: int
:param statement_id: Identifier for the statement.
:type statement_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatement, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatement
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatement"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
'statementId': self._serialize.url("statement_id", statement_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatement', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements/{statementId}'} # type: ignore
async def cancel_spark_statement(
self,
session_id: int,
statement_id: int,
**kwargs
) -> "models.SparkStatementCancellationResult":
"""Kill a statement within a session.
:param session_id: Identifier for the session.
:type session_id: int
:param statement_id: Identifier for the statement.
:type statement_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkStatementCancellationResult, or the result of cls(response)
:rtype: ~azure.synapse.spark.models.SparkStatementCancellationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SparkStatementCancellationResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.cancel_spark_statement.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("self._config.spark_pool_name", self._config.spark_pool_name, 'str', skip_quote=True),
'sessionId': self._serialize.url("session_id", session_id, 'int'),
'statementId': self._serialize.url("statement_id", statement_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('SparkStatementCancellationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_spark_statement.metadata = {'url': '/sessions/{sessionId}/statements/{statementId}/cancel'} # type: ignore
| {
"content_hash": "fe1e6c60d5745538d020c1cf998d8a66",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 138,
"avg_line_length": 47.44632768361582,
"alnum_prop": 0.6496388028895769,
"repo_name": "Azure/azure-sdk-for-python",
"id": "624bc2ae298ff66ab1d917c2fa486b3be5454952",
"size": "25661",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-synapse/azure/synapse/spark/aio/operations_async/_spark_session_operations_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
BLENDS_DIR_NAME = 'blends'
EXPORT_DIRNAME = 'export_tmpdir'
# The environment variable names
ENV_PREVIZ_API_ROOT = 'PREVIZ_API_ROOT'
ENV_PREVIZ_TEAM_UUID = 'PREVIZ_TEAM_UUID'
ENV_PREVIZ_API_TOKEN = 'PREVIZ_API_TOKEN'
| {
"content_hash": "96448ae234325e17ac1e67d9ba801fad",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.7431192660550459,
"repo_name": "Previz-app/io_scene_previz",
"id": "3cd2964c6a98c39cd19f8ae7c57ff807268995ca",
"size": "261",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "181"
},
{
"name": "Python",
"bytes": "63741"
},
{
"name": "Shell",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""
Tests for epoll wrapper.
"""
import socket
import errno
import time
import select
import unittest
from test import support
if not hasattr(select, "epoll"):
raise unittest.SkipTest("test works only on Linux 2.6")
try:
select.epoll()
except IOError as e:
if e.errno == errno.ENOSYS:
raise unittest.SkipTest("kernel doesn't support epoll()")
raise
class TestEPoll(unittest.TestCase):
def setUp(self):
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
for skt in self.connections:
skt.close()
def _connected_pair(self):
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error as e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
raise AssertionError("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
try:
ep = select.epoll(16)
except OSError as e:
raise AssertionError(str(e))
self.assertTrue(ep.fileno() > 0, ep.fileno())
self.assertTrue(not ep.closed)
ep.close()
self.assertTrue(ep.closed)
self.assertRaises(ValueError, ep.fileno)
def test_badcreate(self):
self.assertRaises(TypeError, select.epoll, 1, 2, 3)
self.assertRaises(TypeError, select.epoll, 'foo')
self.assertRaises(TypeError, select.epoll, None)
self.assertRaises(TypeError, select.epoll, ())
self.assertRaises(TypeError, select.epoll, ['foo'])
self.assertRaises(TypeError, select.epoll, {})
def test_add(self):
server, client = self._connected_pair()
ep = select.epoll(2)
try:
ep.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
# adding by object w/ fileno works, too.
ep = select.epoll(2)
try:
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
ep.register(client, select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
ep = select.epoll(2)
try:
# TypeError: argument must be an int, or have a fileno() method.
self.assertRaises(TypeError, ep.register, object(),
select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(TypeError, ep.register, None,
select.EPOLLIN | select.EPOLLOUT)
# ValueError: file descriptor cannot be a negative integer (-1)
self.assertRaises(ValueError, ep.register, -1,
select.EPOLLIN | select.EPOLLOUT)
# IOError: [Errno 9] Bad file descriptor
self.assertRaises(IOError, ep.register, 10000,
select.EPOLLIN | select.EPOLLOUT)
# registering twice also raises an exception
ep.register(server, select.EPOLLIN | select.EPOLLOUT)
self.assertRaises(IOError, ep.register, server,
select.EPOLLIN | select.EPOLLOUT)
finally:
ep.close()
def test_fromfd(self):
server, client = self._connected_pair()
ep = select.epoll(2)
ep2 = select.epoll.fromfd(ep.fileno())
ep2.register(server.fileno(), select.EPOLLIN | select.EPOLLOUT)
ep2.register(client.fileno(), select.EPOLLIN | select.EPOLLOUT)
events = ep.poll(1, 4)
events2 = ep2.poll(0.9, 4)
self.assertEqual(len(events), 2)
self.assertEqual(len(events2), 2)
ep.close()
try:
ep2.poll(1, 4)
except IOError as e:
self.assertEqual(e.args[0], errno.EBADF, e)
else:
self.fail("epoll on closed fd didn't raise EBADF")
def test_control_and_wait(self):
client, server = self._connected_pair()
ep = select.epoll(16)
ep.register(server.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
ep.register(client.fileno(),
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.1, then - now)
events.sort()
expected = [(client.fileno(), select.EPOLLOUT),
(server.fileno(), select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
self.assertFalse(then - now > 0.01, then - now)
now = time.time()
events = ep.poll(timeout=2.1, maxevents=4)
then = time.time()
self.assertFalse(events)
client.send(b"Hello!")
server.send(b"world!!!")
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
events.sort()
expected = [(client.fileno(), select.EPOLLIN | select.EPOLLOUT),
(server.fileno(), select.EPOLLIN | select.EPOLLOUT)]
expected.sort()
self.assertEqual(events, expected)
ep.unregister(client.fileno())
ep.modify(server.fileno(), select.EPOLLOUT)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
expected = [(server.fileno(), select.EPOLLOUT)]
self.assertEqual(events, expected)
def test_errors(self):
self.assertRaises(ValueError, select.epoll, -2)
self.assertRaises(ValueError, select.epoll().register, -1,
select.EPOLLIN)
def test_unregister_closed(self):
server, client = self._connected_pair()
fd = server.fileno()
ep = select.epoll(16)
ep.register(server)
now = time.time()
events = ep.poll(1, 4)
then = time.time()
self.assertFalse(then - now > 0.01)
server.close()
ep.unregister(fd)
def test_main():
support.run_unittest(TestEPoll)
if __name__ == "__main__":
test_main()
| {
"content_hash": "2006b7e85624a8aa863b38b98e81c800",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 77,
"avg_line_length": 31.603960396039604,
"alnum_prop": 0.5834899749373433,
"repo_name": "wdv4758h/ZipPy",
"id": "083fd7f79d97643b64c1b2864635943c06e734a6",
"size": "7498",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "lib-python/3/test/test_epoll.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import uuid
import hashlib
from .. import constants
ROUND = constants.DEFAULT_PRECISION
def bit_mask(flags):
"""Generate a bit mask.
:type flags: dict
:return: int
"""
bit = 0
true = lambda x, y: (x | (1 << y))
false = lambda x, y: (x & (~(1 << y)))
for mask, position in constants.MASK.items():
func = true if flags.get(mask) else false
bit = func(bit, position)
return bit
def hash(value):
"""Generate a hash from a given value
:param value:
:rtype: str
"""
hash_ = hashlib.md5()
hash_.update(repr(value).encode('utf8'))
return hash_.hexdigest()
def id():
"""Generate a random UUID
:rtype: str
"""
return str(uuid.uuid4()).upper()
def id_from_name(name):
"""Generate a UUID using a name as the namespace
:type name: str
:rtype: str
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, name)).upper()
def rgb2int(rgb):
"""Convert a given rgb value to an integer
:type rgb: list|tuple
:rtype: int
"""
is_tuple = isinstance(rgb, tuple)
rgb = list(rgb) if is_tuple else rgb
colour = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255)
return colour
def round_off(value, ndigits=ROUND):
"""Round off values to specified limit
:param value: value(s) to round off
:param ndigits: limit (Default value = ROUND)
:type value: float|list|tuple
:return: the same data type that was passed
:rtype: float|list|tuple
"""
is_tuple = isinstance(value, tuple)
is_list = isinstance(value, list)
value = list(value) if is_tuple else value
value = [value] if not is_list and not is_tuple else value
value = [round(val, ndigits) for val in value]
if is_tuple:
value = tuple(value)
elif not is_list:
value = value[0]
return value
def rounding(options):
"""By evaluation the options determine if precision was
enabled and what the value is
:type options: dict
:rtype: bool, int
"""
round_off_ = options.get(constants.ENABLE_PRECISION)
if round_off_:
round_val = options[constants.PRECISION]
else:
round_val = None
return (round_off_, round_val)
| {
"content_hash": "53df057615200637901effa5df1e07a5",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 19.876106194690266,
"alnum_prop": 0.6077471059661621,
"repo_name": "archilogic-com/three.js",
"id": "87e773cfaf7a2c3ad2a12fa96bcf30d309f50fa6",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "utils/exporters/blender/addons/io_three/exporter/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "139"
},
{
"name": "C",
"bytes": "80088"
},
{
"name": "C++",
"bytes": "116991"
},
{
"name": "CSS",
"bytes": "18564"
},
{
"name": "GLSL",
"bytes": "91388"
},
{
"name": "HTML",
"bytes": "38393"
},
{
"name": "JavaScript",
"bytes": "4714692"
},
{
"name": "MAXScript",
"bytes": "75494"
},
{
"name": "Python",
"bytes": "422248"
},
{
"name": "Shell",
"bytes": "9783"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_instance_facts
short_description: Gathering facts from the API of instances from Apache CloudStack based clouds.
description:
- Gathering facts from the API of an instance.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name or display name of the instance.
required: true
domain:
description:
- Domain the instance is related to.
required: false
default: null
account:
description:
- Account the instance is related to.
required: false
default: null
project:
description:
- Project the instance is related to.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- cs_instance_facts:
name: web-vm-1
delegate_to: localhost
- debug:
var: cloudstack_instance
'''
RETURN = '''
---
cloudstack_instance.id:
description: UUID of the instance.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
cloudstack_instance.name:
description: Name of the instance.
returned: success
type: string
sample: web-01
cloudstack_instance.display_name:
description: Display name of the instance.
returned: success
type: string
sample: web-01
cloudstack_instance.group:
description: Group name of the instance is related.
returned: success
type: string
sample: web
created:
description: Date of the instance was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
cloudstack_instance.password_enabled:
description: True if password setting is enabled.
returned: success
type: boolean
sample: true
cloudstack_instance.password:
description: The password of the instance if exists.
returned: success
type: string
sample: Ge2oe7Do
cloudstack_instance.ssh_key:
description: Name of SSH key deployed to instance.
returned: success
type: string
sample: key@work
cloudstack_instance.domain:
description: Domain the instance is related to.
returned: success
type: string
sample: example domain
cloudstack_instance.account:
description: Account the instance is related to.
returned: success
type: string
sample: example account
cloudstack_instance.project:
description: Name of project the instance is related to.
returned: success
type: string
sample: Production
cloudstack_instance.default_ip:
description: Default IP address of the instance.
returned: success
type: string
sample: 10.23.37.42
cloudstack_instance.public_ip:
description: Public IP address with instance via static NAT rule.
returned: success
type: string
sample: 1.2.3.4
cloudstack_instance.iso:
description: Name of ISO the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
cloudstack_instance.template:
description: Name of template the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
cloudstack_instance.service_offering:
description: Name of the service offering the instance has.
returned: success
type: string
sample: 2cpu_2gb
cloudstack_instance.zone:
description: Name of zone the instance is in.
returned: success
type: string
sample: ch-gva-2
cloudstack_instance.state:
description: State of the instance.
returned: success
type: string
sample: Running
cloudstack_instance.security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
cloudstack_instance.affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
cloudstack_instance.tags:
description: List of resource tags associated with the instance.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
cloudstack_instance.hypervisor:
description: Hypervisor related to this instance.
returned: success
type: string
sample: KVM
cloudstack_instance.instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: string
sample: i-44-3992-VM
cloudstack_instance.volumes:
description: List of dictionaries of the volumes attached to the instance.
returned: success
type: list
sample: '[ { name: "ROOT-1369", type: "ROOT", size: 10737418240 }, { name: "data01, type: "DATADISK", size: 10737418240 } ]'
'''
import base64
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstanceFacts, self).__init__(module)
self.instance = None
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'keypair': 'ssh_key',
}
self.facts = {
'cloudstack_instance': None,
}
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.module.params.get('name')
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.cs.listVirtualMachines(**args)
if instances:
for v in instances['virtualmachine']:
if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
self.instance = v
break
return self.instance
def get_volumes(self, instance):
volume_details = []
if instance:
args = {}
args['account'] = instance.get('account')
args['domainid'] = instance.get('domainid')
args['projectid'] = instance.get('projectid')
args['virtualmachineid'] = instance['id']
volumes = self.cs.listVolumes(**args)
if volumes:
for vol in volumes['volume']:
volume_details.append({'size': vol['size'], 'type': vol['type'], 'name': vol['name']})
return volume_details
def run(self):
instance = self.get_instance()
if not instance:
self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
self.facts['cloudstack_instance'] = self.get_result(instance)
return self.facts
def get_result(self, instance):
super(AnsibleCloudStackInstanceFacts, self).get_result(instance)
if instance:
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault'] and 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
volumes = self.get_volumes(instance)
if volumes:
self.result['volumes'] = volumes
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
cs_instance_facts = AnsibleCloudStackInstanceFacts(module=module).run()
cs_facts_result = dict(changed=False, ansible_facts=cs_instance_facts)
module.exit_json(**cs_facts_result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| {
"content_hash": "906850d5c5dfd96d4574b61b52629920",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 126,
"avg_line_length": 31.209964412811388,
"alnum_prop": 0.6380843785632839,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "dde43d707745af8881b25561df2fab44eff09b38",
"size": "9513",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/cloudstack/cs_instance_facts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
"""
:mod:`Crossovers` -- crossover methods module
=====================================================================
In this module we have the genetic operators of crossover (or recombination) for each chromosome representation.
"""
from random import randint as rand_randint, choice as rand_choice
from random import random as rand_random
import math
import Util
import Consts
#############################
## 1D Binary String ##
#############################
def G1DBinaryStringXSinglePoint(genome, **args):
""" The crossover of 1D Binary String, Single Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DBinaryStringXTwoPoint(genome, **args):
""" The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DBinaryStringXUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DBinaryStringUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
####################
## 1D List ##
####################
def G1DListCrossoverSinglePoint(genome, **args):
""" The crossover of G1DList, Single Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cut:] = gDad[cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cut:] = gMom[cut:]
return (sister, brother)
def G1DListCrossoverTwoPoint(genome, **args):
""" The G1DList crossover, Two Point
.. warning:: You can't use this crossover method for lists with just one element.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DBinaryStringXGeneConversion(genome, **args):
""" The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
return (sister, brother)
def G1DBinaryStringXPMXCrossover(genome, **args):
""" The 1D Binary String crossover, Two Point
.. warning:: You can't use this crossover method for binary strings with length of 1.
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The Binary String have one element, can't use the Two Point Crossover method !", TypeError)
cuts = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
if cuts[0] > cuts[1]:
Util.listSwapElement(cuts, 0, 1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
sister[cuts[0]:cuts[1]] = gDad[cuts[0]:cuts[1]]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
brother[cuts[0]:cuts[1]] = gMom[cuts[0]:cuts[1]]
temp1 = len(sister[0:cuts[0]])
temp2 = len(brother[0:cuts[0]])
for i in xrange(len(sister)):
for j in xrange(len(sister[cuts[0]:cuts[1]])):
if sister[i] == sister[temp1+j] and i != (temp1+j):
sister[i] = gMom[temp1+j]
for i in xrange(len(brother)):
for j in xrange(len(brother[cuts[0]:brother[1]])):
if brother[i] == brother[temp2+j] and i != (temp2+j):
brother[i] = gDad[temp2+j]
return (sister, brother)
def G1DListCrossoverUniform(genome, **args):
""" The G1DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in xrange(len(gMom)):
if Util.randomFlipCoin(Consts.CDefG1DListCrossUniformProb):
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
return (sister, brother)
def G1DListCrossoverOX(genome, **args):
""" The OX Crossover for G1DList (order crossover) """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
listSize = len(gMom)
c1, c2 = [rand_randint(1, len(gMom)-1), rand_randint(1, len(gMom)-1)]
while c1 == c2:
c2 = rand_randint(1, len(gMom)-1)
if c1 > c2:
h = c1
c1 = c2
c2 = h
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
P1 = [ c for c in gMom[c2:] + gMom[:c2] if c not in gDad[c1:c2] ]
sister.genomeList = P1[listSize - c2:] + gDad[c1:c2] + P1[:listSize-c2]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
P2 = [ c for c in gDad[c2:] + gDad[:c2] if c not in gMom[c1:c2] ]
brother.genomeList = P2[listSize - c2:] + gMom[c1:c2] + P2[:listSize-c2]
assert listSize == len(sister)
assert listSize == len(brother)
return (sister, brother)
def G1DListCrossoverEdge(genome, **args):
""" THe Edge Recombination crossover for G1DList (widely used for TSP problem)
See more information in the `Edge Recombination Operator <http://en.wikipedia.org/wiki/Edge_recombination_operator>`_
Wikipedia entry.
"""
gMom, sisterl = args["mom"], []
gDad, brotherl = args["dad"], []
mom_edges, dad_edges, merge_edges = Util.G1DListGetEdgesComposite(gMom, gDad)
for c, u in (sisterl, set(gMom)), (brotherl, set(gDad)):
curr = None
for i in xrange(len(gMom)):
curr = rand_choice(tuple(u)) if not curr else curr
c.append(curr)
u.remove(curr)
d = [v for v in merge_edges.get(curr, []) if v in u]
if d: curr = rand_choice(d)
else:
s = [v for v in mom_edges.get(curr, []) if v in u]
s += [v for v in dad_edges.get(curr, []) if v in u]
curr = rand_choice(s) if s else None
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
sister.genomeList = sisterl
brother.genomeList = brotherl
return (sister, brother)
def G1DListCrossoverCutCrossfill(genome, **args):
""" The crossover of G1DList, Cut and crossfill, for permutations
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
if len(gMom) == 1:
Util.raiseException("The 1D List have one element, can't use the Single Point Crossover method !", TypeError)
cut = rand_randint(1, len(gMom)-1)
if args["count"] >= 1:
sister = gMom.clone()
mother_part = gMom[0:cut]
sister.resetStats()
i = (len(sister) - cut)
x = 0
for v in gDad:
if v in mother_part: continue
if x >= i: break
sister[cut+x] = v
x += 1
if args["count"] == 2:
brother = gDad.clone()
father_part = gDad[0:cut]
brother.resetStats()
i = (len(brother) - cut)
x = 0
for v in gMom:
if v in father_part: continue
if x >= i: break
brother[cut+x] = v
x += 1
return (sister, brother)
def G1DListCrossoverRealSBX(genome, **args):
""" Experimental SBX Implementation - Follows the implementation in NSGA-II (Deb, et.al)
Some implementation `reference <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. warning:: This crossover method is Data Type Dependent, which means that
must be used for 1D genome of real values.
"""
EPS = Consts.CDefG1DListSBXEPS
# Crossover distribution index
eta_c = Consts.CDefG1DListSBXEtac
gMom = args["mom"]
gDad = args["dad"]
# Get the variable bounds ('gDad' could have been used; but I love Mom:-))
lb = gMom.getParam("rangemin", Consts.CDefRangeMin)
ub = gMom.getParam("rangemax", Consts.CDefRangeMax)
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
for i in range(0,len(gMom)):
if math.fabs(gMom[i]-gDad[i]) > EPS:
if gMom[i] > gDad[i]:
#swap
temp = gMom[i]
gMom[i] = gDad[i]
gDad[i] = temp
#random number betwn. 0 & 1
u = rand_random()
beta = 1.0 + 2*(gMom[i] - lb)/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1.0)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
brother[i] = 0.5*((gMom[i] + gDad[i]) - beta_q*(gDad[i]-gMom[i]))
beta = 1.0 + 2.0*(ub - gDad[i])/(1.0*(gDad[i]-gMom[i]))
alpha = 2.0 - beta**(-(eta_c+1.0))
if u <= (1.0/alpha):
beta_q = (u*alpha)**(1.0/((eta_c + 1)*1.0))
else:
beta_q = (1.0/(2.0-u*alpha))**(1.0/(1.0*(eta_c + 1.0)))
sister[i] = 0.5*((gMom[i] + gDad[i]) + beta_q*(gDad[i]-gMom[i]))
if brother[i] > ub: brother[i] = ub
if brother[i] < lb: brother[i] = lb
if sister[i] > ub: sister[i] = ub
if sister[i] < lb: sister[i] = lb
if rand_random() > 0.5:
# Swap
temp = sister[i]
sister[i] = brother[i]
brother[i] = temp
else:
sister[i] = gMom[i]
brother[i] = gDad[i]
return (sister, brother)
####################
## 2D List ##
####################
def G2DListCrossoverUniform(genome, **args):
""" The G2DList Uniform Crossover """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DListCrossUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DListCrossoverSingleVPoint(genome, **args):
""" The crossover of G2DList, Single Vertical Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DListCrossoverSingleHPoint(genome, **args):
""" The crossover of G2DList, Single Horizontal Point """
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## 2D Binary String ##
#############################
def G2DBinaryStringXUniform(genome, **args):
""" The G2DBinaryString Uniform Crossover
.. versionadded:: 0.6
The *G2DBinaryStringXUniform* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
sister = gMom.clone()
brother = gDad.clone()
sister.resetStats()
brother.resetStats()
h, w = gMom.getSize()
for i in xrange(h):
for j in xrange(w):
if Util.randomFlipCoin(Consts.CDefG2DBinaryStringUniformProb):
temp = sister.getItem(i, j)
sister.setItem(i, j, brother.getItem(i, j))
brother.setItem(i, j, temp)
return (sister, brother)
def G2DBinaryStringXSingleVPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Vertical Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleVPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getWidth()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(sister.getHeight()):
sister[i][cut:] = gDad[i][cut:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][cut:] = gMom[i][cut:]
return (sister, brother)
def G2DBinaryStringXSingleHPoint(genome, **args):
""" The crossover of G2DBinaryString, Single Horizontal Point
.. versionadded:: 0.6
The *G2DBinaryStringXSingleHPoint* function
"""
sister = None
brother = None
gMom = args["mom"]
gDad = args["dad"]
cut = rand_randint(1, gMom.getHeight()-1)
if args["count"] >= 1:
sister = gMom.clone()
sister.resetStats()
for i in xrange(cut, sister.getHeight()):
sister[i][:] = gDad[i][:]
if args["count"] == 2:
brother = gDad.clone()
brother.resetStats()
for i in xrange(brother.getHeight()):
brother[i][:] = gMom[i][:]
return (sister, brother)
#############################
## Tree ##
#############################
def GTreeCrossoverSinglePoint(genome, **args):
""" The crossover for GTree, Single Point """
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
node_mom_stack = []
all_mom_nodes = []
node_mom_tmp = None
node_dad_stack = []
all_dad_nodes = []
node_dad_tmp = None
node_mom_stack.append(gMom.getRoot())
node_dad_stack.append(gDad.getRoot())
while (len(node_mom_stack) > 0) and (len(node_dad_stack) > 0):
node_mom_tmp = node_mom_stack.pop()
node_dad_tmp = node_dad_stack.pop()
if node_mom_tmp != gMom.getRoot():
all_mom_nodes.append(node_mom_tmp)
all_dad_nodes.append(node_dad_tmp)
node_mom_stack.extend(node_mom_tmp.getChilds())
node_dad_stack.extend(node_dad_tmp.getChilds())
if len(all_mom_nodes)==0 or len(all_dad_nodes)==0:
return (gMom, gDad)
if len(all_dad_nodes) == 1: nodeDad = all_dad_nodes[0]
else: nodeDad = rand_choice(all_dad_nodes)
if len(all_mom_nodes) == 1: nodeMom = all_mom_nodes[0]
else: nodeMom = rand_choice(all_mom_nodes)
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
return (sister, brother)
def GTreeCrossoverSinglePointStrict(genome, **args):
""" The crossover of Tree, Strict Single Point
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required), and
the distr_leaft (>= 0.0 and <= 1.0), which represents the probability
of leaf selection when findin random nodes for crossover.
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 10)
distr_leaf = gMom.getParam("distr_leaf", None)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
if distr_leaf is None:
dadRandom = gDad.getRandomNode()
momRandom = gMom.getRandomNode()
else:
if Util.randomFlipCoin(distr_leaf):
momRandom = gMom.getRandomNode(1)
else:
momRandom = gMom.getRandomNode(2)
if Util.randomFlipCoin(distr_leaf):
dadRandom = gDad.getRandomNode(1)
else:
dadRandom = gDad.getRandomNode(2)
assert momRandom is not None
assert dadRandom is not None
# Optimize here
mH = gMom.getNodeHeight(momRandom)
dH = gDad.getNodeHeight(dadRandom)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# The depth of the crossover is greater than the max_depth
if (dD+mH <= max_depth) and (mD+dH <= max_depth):
break
if i == (max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
#############################################################################
################# GTreeGP Crossovers ######################################
#############################################################################
def GTreeGPCrossoverSinglePoint(genome, **args):
""" The crossover of the GTreeGP, Single Point for Genetic Programming
..note:: This crossover method creates offspring with restriction of the
*max_depth* parameter.
Accepts the *max_attempt* parameter, *max_depth* (required).
"""
sister = None
brother = None
gMom = args["mom"].clone()
gDad = args["dad"].clone()
gMom.resetStats()
gDad.resetStats()
max_depth = gMom.getParam("max_depth", None)
max_attempt = gMom.getParam("max_attempt", 15)
if max_depth is None:
Util.raiseException("You must specify the max_depth genome parameter !", ValueError)
if max_depth < 0:
Util.raiseException("The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !", ValueError)
momRandom = None
dadRandom = None
for i in xrange(max_attempt):
dadRandom = gDad.getRandomNode()
if dadRandom.getType() == Consts.nodeType["TERMINAL"]:
momRandom = gMom.getRandomNode(1)
elif dadRandom.getType() == Consts.nodeType["NONTERMINAL"]:
momRandom = gMom.getRandomNode(2)
mD = gMom.getNodeDepth(momRandom)
dD = gDad.getNodeDepth(dadRandom)
# Two nodes are root
if mD==0 and dD==0: continue
mH = gMom.getNodeHeight(momRandom)
if dD+mH > max_depth: continue
dH = gDad.getNodeHeight(dadRandom)
if mD+dH > max_depth: continue
break
if i==(max_attempt-1):
assert gMom.getHeight() <= max_depth
return (gMom, gDad)
else:
nodeMom, nodeDad = momRandom, dadRandom
nodeMom_parent = nodeMom.getParent()
nodeDad_parent = nodeDad.getParent()
# Sister
if args["count"] >= 1:
sister = gMom
nodeDad.setParent(nodeMom_parent)
if nodeMom_parent is None:
sister.setRoot(nodeDad)
else:
nodeMom_parent.replaceChild(nodeMom, nodeDad)
sister.processNodes()
assert sister.getHeight() <= max_depth
# Brother
if args["count"] == 2:
brother = gDad
nodeMom.setParent(nodeDad_parent)
if nodeDad_parent is None:
brother.setRoot(nodeMom)
else:
nodeDad_parent.replaceChild(nodeDad, nodeMom)
brother.processNodes()
assert brother.getHeight() <= max_depth
return (sister, brother)
| {
"content_hash": "979dae53451bff613e7e9ad8faf1c7d9",
"timestamp": "",
"source": "github",
"line_count": 858,
"max_line_length": 131,
"avg_line_length": 27.06177156177156,
"alnum_prop": 0.585727206167363,
"repo_name": "tapomayukh/projects_in_python",
"id": "b0da6ad06f0ec1e81e096734cc08054cd27d05a0",
"size": "23219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox_tapo/src/AI/Genetic Algorithm/Crossovers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4903"
},
{
"name": "Python",
"bytes": "4451912"
}
],
"symlink_target": ""
} |
"""
Network.
Copyright 2014 Stanford University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy.stats
from .layer import InputLayer
class Network(object):
"""
Network containing one or more layers.
The network is responsible for all the plumbing between layers.
Parameters
----------
layers : list
Layers.
input_dim : int
Input dimensionality.
trainer : Trainer, optional
Trainer.
"""
#TODO don't require explicit weights (we don't know layers[0] dim)
#TODO get input_dim from dataset?
def __init__(self, layers, input_dim, trainer=None):
input_layer = InputLayer(input_dim)
self.layers = np.concatenate(([input_layer], layers))
self.trainer = trainer
self.setup()
def setup(self):
"""
Network setup.
This method handles network plumbing, weight initialization, etc.
"""
for i, layer in enumerate(self.layers[:-1]):
if layer.weights is None:
layer.weights = np.asmatrix(
scipy.stats.norm(layer.scale).rvs(
(self.layers[i+1].size, layer.size)))
if layer.biases is None:
layer.biases = np.asmatrix(
np.zeros((self.layers[i+1].size, 1)))
# handle output layer
if self.layers[-1].weights is None:
self.layers[-1].weights = np.asmatrix(
np.identity(self.layers[-1].size))
if self.layers[-1].biases is None:
self.layers[-1].biases = np.asmatrix(
np.zeros((self.layers[-1].size, 1)))
def set_trainer(self, trainer):
"""
Set trainer.
Parameters
----------
trainer : Trainer
Trainer.
"""
self.trainer = trainer
self.trainer.set_network(self)
def fit(self, X, y=None, n_epochs=500):
"""
Train the network.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Training labels.
n_epochs : int
Number of training epochs.
"""
self.trainer.fit(X, y, n_epochs=n_epochs)
def predict(self, X):
"""
Predict labels for examples.
Parameters
----------
x : array_like
Input values.
"""
return self.forward(np.asmatrix(X).T)
def forward(self, z):
"""
Forward propagation.
Parameters
----------
z : array_like
Transformed input.
"""
a = None
for layer in self.layers:
a = layer.activate(z)
z = layer.transform(a)
return a
def get_activations_and_gradients(self, z):
"""
Get activations and gradient for each layer.
Parameters
----------
z : array_like
Transformed input.
"""
activations, gradients = [], []
for i, layer in enumerate(self.layers):
a, g = layer.get_activations_and_gradient(z)
activations.append(a)
gradients.append(g)
z = layer.transform(a)
return activations, gradients
def backpropagate_errors(self, output_error, gradients):
"""
Backpropagate errors.
Parameters
----------
output_error : array_like
Output error (cost gradient).
gradients : list
Gradients for each layer.
"""
errors = [output_error]
error = output_error
for i in range(len(self.layers)-1)[::-1]:
error = np.multiply(self.layers[i].weights.T * error, gradients[i])
errors.append(error)
return errors[::-1]
| {
"content_hash": "5bcf80b17ae6b6749b437821f835e3ba",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 27.743589743589745,
"alnum_prop": 0.5566081330868762,
"repo_name": "skearnes/neural-network",
"id": "9bf1307d5ab2a9b13cfa630269d9dd5628d297d7",
"size": "4328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neural_network/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15933"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Simple example of setting up a multi-agent policy mapping.
Control the number of agents and policies via --num-agents and --num-policies.
This works with hundreds of agents and policies, but note that initializing
many TF policies will take some time.
Also, TF evals might slow down with large numbers of policies. To debug TF
execution, set the TF_TIMELINE_DIR environment variable.
"""
import argparse
import gym
import random
import ray
from ray import tune
from ray.rllib.models import Model, ModelCatalog
from ray.rllib.tests.test_multi_agent_env import MultiCartpole
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--num-agents", type=int, default=4)
parser.add_argument("--num-policies", type=int, default=2)
parser.add_argument("--num-iters", type=int, default=20)
parser.add_argument("--simple", action="store_true")
class CustomModel1(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
# Example of (optional) weight sharing between two different policies.
# Here, we share the variables defined in the 'shared' variable scope
# by entering it explicitly with tf.AUTO_REUSE. This creates the
# variables for the 'fc1' layer in a global scope called 'shared'
# outside of the policy's normal variable scope.
with tf.variable_scope(
tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(
input_dict["obs"], 64, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(
last_layer, 64, activation=tf.nn.relu, name="fc2")
output = tf.layers.dense(
last_layer, num_outputs, activation=None, name="fc_out")
return output, last_layer
class CustomModel2(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
# Weights shared with CustomModel1
with tf.variable_scope(
tf.VariableScope(tf.AUTO_REUSE, "shared"),
reuse=tf.AUTO_REUSE,
auxiliary_name_scope=False):
last_layer = tf.layers.dense(
input_dict["obs"], 64, activation=tf.nn.relu, name="fc1")
last_layer = tf.layers.dense(
last_layer, 64, activation=tf.nn.relu, name="fc2")
output = tf.layers.dense(
last_layer, num_outputs, activation=None, name="fc_out")
return output, last_layer
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Simple environment with `num_agents` independent cartpole entities
register_env("multi_cartpole", lambda _: MultiCartpole(args.num_agents))
ModelCatalog.register_custom_model("model1", CustomModel1)
ModelCatalog.register_custom_model("model2", CustomModel2)
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
# Each policy can have a different configuration (including custom model)
def gen_policy(i):
config = {
"model": {
"custom_model": ["model1", "model2"][i % 2],
},
"gamma": random.choice([0.95, 0.99]),
}
return (None, obs_space, act_space, config)
# Setup PPO with an ensemble of `num_policies` different policies
policies = {
"policy_{}".format(i): gen_policy(i)
for i in range(args.num_policies)
}
policy_ids = list(policies.keys())
tune.run(
"PPO",
stop={"training_iteration": args.num_iters},
config={
"env": "multi_cartpole",
"log_level": "DEBUG",
"simple_optimizer": args.simple,
"num_sgd_iter": 10,
"multiagent": {
"policies": policies,
"policy_mapping_fn": tune.function(
lambda agent_id: random.choice(policy_ids)),
},
},
)
| {
"content_hash": "804d84611512d0a3cc90daad9c455f50",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 36.530434782608694,
"alnum_prop": 0.6308021899547727,
"repo_name": "atumanov/ray",
"id": "275c54390f9708ebd118009d9671b334f8f84dcb",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/rllib/examples/multiagent_cartpole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20715"
},
{
"name": "C++",
"bytes": "1036803"
},
{
"name": "CSS",
"bytes": "9262"
},
{
"name": "Dockerfile",
"bytes": "3411"
},
{
"name": "HTML",
"bytes": "32704"
},
{
"name": "Java",
"bytes": "517715"
},
{
"name": "JavaScript",
"bytes": "8178"
},
{
"name": "Jupyter Notebook",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "3081422"
},
{
"name": "Ruby",
"bytes": "956"
},
{
"name": "Shell",
"bytes": "76928"
},
{
"name": "Smarty",
"bytes": "955"
}
],
"symlink_target": ""
} |
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
if value is None:
value = [1]
elif not isinstance(value, collections_abc.Sized):
value = [value]
current_n = len(value)
if current_n == n + 2:
return value
elif current_n == 1:
value = list((value[0],) * n)
elif current_n == n:
value = list(value)
else:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, current_n))
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a monotonically increasing sequence of "
"positive integers")
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True):
"""Internal function which performs rank agnostic convolution."""
if isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape.rank is not None:
n = len(input.shape) - 2
elif not isinstance(input.shape, tensor_shape.TensorShape) and \
input.shape is not None:
n = len(input.shape) - 2
elif isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape.rank is not None:
n = len(filters.shape) - 2
elif not isinstance(filters.shape, tensor_shape.TensorShape) and \
filters.shape is not None:
n = len(filters.shape) - 2
else:
raise ValueError("rank of input or filter must be known")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
strides = _get_sequence(strides, n, channel_index, "strides")
dilations = _get_sequence(dilations, n, channel_index, "dilations")
scopes = {1: "conv1d", 2: "Conv2D", 3: "Conv3D"}
if not call_from_convolution and device_context.enclosing_tpu_context(
) is not None:
scope = scopes[n]
else:
scope = "convolution"
with ops.name_scope(name, scope, [input, filters]) as name:
conv_ops = {1: conv1d, 2: gen_nn_ops.conv2d, 3: gen_nn_ops.conv3d}
if device_context.enclosing_tpu_context() is not None or all(
i == 1 for i in dilations):
# fast path for TPU or if no dilation as gradient only supported on GPU
# for dilations
op = conv_ops[n]
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of [batch, in_width, in_channels]. The
`"NCW"` format stores data as [batch, in_channels, in_width].
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export(v1=["nn.depthwise_conv2d_native"])
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native")
def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D depthwise convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`. 1-D of length 4. The stride of the sliding
window for each dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native(
input,
filter,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_input",
v1=[
"nn.depthwise_conv2d_native_backprop_input",
"nn.depthwise_conv2d_backprop_input"
])
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input")
def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`. An integer vector representing the
shape of `input`, based on `data_format`. For example, if `data_format`
is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape `[filter_height, filter_width,
in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with
shape based on `data_format`. For example, if `data_format` is 'NHWC'
then out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_input(
input_sizes,
filter,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_filter",
v1=[
"nn.depthwise_conv2d_native_backprop_filter",
"nn.depthwise_conv2d_backprop_filter"
])
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter")
def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape based on `data_format`. For example,
if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`. An integer vector representing the
tensor shape of `filter`, where `filter` is a 4-D `[filter_height,
filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape
based on `data_format`. For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_filter(
input,
filter_sizes,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv3d"])
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 5-D `Tensor` with the same type as `value` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tensor(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
See: https://en.wikipedia.org/wiki/Softmax_function
Example usage:
>>> tf.nn.softmax([-1, 0., 1.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
Args:
logits: A non-empty `Tensor`, or an object whose type has a registered
`Tensor` conversion function. Must be one of the following types:
`half`,`float32`, `float64`. See also `convert_to_tensor`
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
TypeError: If no conversion function is registered for `logits` to
Tensor.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
max_pooling_ops = {
1: max_pool1d,
2: gen_nn_ops.max_pool,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| {
"content_hash": "b95e769b4a8f97454bb2b71da1d58b35",
"timestamp": "",
"source": "github",
"line_count": 5221,
"max_line_length": 116,
"avg_line_length": 40.62363531890443,
"alnum_prop": 0.6533880884127942,
"repo_name": "gunan/tensorflow",
"id": "9a22f40f8a2b3104f0e25365b6f0dbaa3cab73e6",
"size": "212785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/nn_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from pylab import *
import matplotlib.cbook as cbook
# data are 256x256 16 bit integers
dfile = cbook.get_sample_data('s1045.ima.gz')
im = np.fromstring(dfile.read(), np.uint16).astype(float)
im.shape = 256, 256
#imshow(im, ColormapJet(256))
imshow(im, cmap=cm.gray)
axis('off')
show()
| {
"content_hash": "e64e4ede89c9108d3d13bb4980beedb4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 25.076923076923077,
"alnum_prop": 0.7300613496932515,
"repo_name": "lthurlow/Network-Grapher",
"id": "6fd45cf675ec5e7eb46bef463fdda6f3173ee63b",
"size": "349",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/mri_demo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
} |
from rdr_service.config import GENOME_TYPE_ARRAY, GENOME_TYPE_WGS
from rdr_service.genomic_enums import GenomicJob
from rdr_service.genomic.genomic_job_components import GenomicFileValidator
from tests.helpers.unittest_base import BaseTestCase
class GenomicFileValidatorTest(BaseTestCase):
def setUp(self):
super(GenomicFileValidatorTest, self).setUp()
def test_set_genome_type_filename(self):
array_filename = 'RDR_AoU_GEN_TestData.csv'
wgs_filename = 'RDR_AoU_SEQ_TestData.csv'
file_validator = GenomicFileValidator(
filename=wgs_filename,
job_id=GenomicJob.AW1_MANIFEST
)
self.assertIsNone(file_validator.genome_type)
file_validator = GenomicFileValidator(
filename=array_filename,
job_id=GenomicJob.METRICS_INGESTION
)
file_validator.set_genome_type()
self.assertIsNotNone(file_validator.genome_type)
self.assertEqual(file_validator.genome_type, GENOME_TYPE_ARRAY)
file_validator = GenomicFileValidator(
filename=wgs_filename,
job_id=GenomicJob.METRICS_INGESTION
)
file_validator.set_genome_type()
self.assertIsNotNone(file_validator.genome_type)
self.assertEqual(file_validator.genome_type, GENOME_TYPE_WGS)
def test_set_gc_site_id_filename(self):
gc_file_name = 'RDR_AoU_GEN_PKG-1908-218051.csv'
rdr_file_components = [x.lower() for x in gc_file_name.split('/')[-1].split("_")]
file_validator = GenomicFileValidator(
filename=gc_file_name,
job_id=GenomicJob.AW1_MANIFEST
)
file_validator.set_gc_site_id(rdr_file_components[0])
self.assertIsNotNone(file_validator.gc_site_id)
self.assertTrue(file_validator.gc_site_id in file_validator.VALID_GENOME_CENTERS)
self.assertEqual(file_validator.gc_site_id, rdr_file_components[0])
file_validator = GenomicFileValidator(
filename=gc_file_name,
job_id=GenomicJob.AW4_ARRAY_WORKFLOW
)
file_validator.set_gc_site_id(rdr_file_components[0])
self.assertIsNotNone(file_validator.gc_site_id)
self.assertEqual(file_validator.gc_site_id, 'drc_broad')
| {
"content_hash": "06e208ecf7a3ff0cc665762084a76202",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 89,
"avg_line_length": 33.44117647058823,
"alnum_prop": 0.6706244503078276,
"repo_name": "all-of-us/raw-data-repository",
"id": "9f3043569a14a619fcdd0e9cc681687eada9f2da",
"size": "2274",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/genomics_tests/test_genomic_file_validator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
DEFAULT_REDIS_HOST = "127.0.0.1"
DEFAULT_REDIS_PORT = 6379
DEFAULT_REDIS_QUEUE = "thr:queue:default"
DEFAULT_TIMEOUT = 300
DEFAULT_BLOCKED_QUEUE_MAX_SIZE = 20
DEFAULT_HTTP_PORT = 8082
DEFAULT_MAXIMUM_LIFETIME = 300
DEFAULT_MAXIMUM_LOCAL_QUEUE_LIFETIME_MS = 1000
BRPOP_TIMEOUT = 5
REDIS_POOL_CLIENT_TIMEOUT = 60
| {
"content_hash": "b91ed0eaa4f65d950fda61a700a847b8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 31.1,
"alnum_prop": 0.7652733118971061,
"repo_name": "thefab/thr",
"id": "710292d38c7431e216eb9712855e0a23faa1f301",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "815"
},
{
"name": "Python",
"bytes": "106271"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
u"""F5 Networks® LBaaSv2 plugin_rpc client for tempest tests."""
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from f5lbaasdriver.v2.bigip import constants_v2 as f5_const
from neutron import context
import oslo_messaging as messaging
from tempest import config
CONF = config.CONF
class F5PluginRPCClient(object):
"""F5 LBaaS plugin RPC client."""
def __init__(self):
"""Initialize the driver plugin RPC client."""
self.environment_prefix = 'Project'
self.topic = '%s_%s' % (f5_const.TOPIC_PROCESS_ON_HOST_V2,
self.environment_prefix)
messaging.set_transport_defaults('neutron')
self.transport = messaging.get_transport(
CONF,
url=CONF.f5_lbaasv2_driver.transport_url)
self.target = messaging.Target(topic=self.topic)
self.client = messaging.RPCClient(self.transport, self.target)
self.context = context.get_admin_context().to_dict()
def get_client(self):
"""Return a client that can connect to the plugin_rpc.py API."""
return self.client
def get_context(self):
"""Return a context object that can be used for RPC."""
return self.context
| {
"content_hash": "b341f7855fae0f10330af30525aeaf2a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 35.87755102040816,
"alnum_prop": 0.6831626848691695,
"repo_name": "mattgreene/f5-openstack-lbaasv2-driver",
"id": "b4acb187f11179d0168c09e03d61791aa2bbb916",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "15367"
},
{
"name": "Python",
"bytes": "401228"
},
{
"name": "Shell",
"bytes": "20496"
}
],
"symlink_target": ""
} |
import os
from .utils import do, trace, data_from_mime
from .version import meta, tags_to_versions
FILES_COMMAND = 'hg locate -I .'
def _hg_tagdist_normalize_tagcommit(root, tag, dist, node):
dirty = node.endswith('+')
node = node.strip('+')
st = do('hg st --no-status --change %s' % str(node), root)
trace('normalize', locals())
if int(dist) == 1 and st == '.hgtags' and not dirty:
return meta(tag)
else:
return meta(tag, distance=dist, node=node, dirty=dirty)
def parse(root):
l = do('hg id -i -t', root).split()
node = l.pop(0)
tags = tags_to_versions(l)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != 'tip']
dirty = node[-1] == '+'
if tags:
return meta(tags[0], dirty=dirty)
if node.strip('+') == '0'*12:
trace('initial node', root)
return meta('0.0', dirty=dirty)
# the newline is needed for merge stae, see issue 72
cmd = 'hg parents --template "{latesttag} {latesttagdistance}\n"'
out = do(cmd, root)
try:
# in merge state we assume parent 1 is fine
tag, dist = out.splitlines()[0].split()
if tag == 'null':
tag = '0.0'
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
def archival_to_version(data):
trace('data', data)
if 'tag' in data:
return meta(data['tag'])
elif 'latesttag' in data:
return meta(data['latesttag'],
distance=data['latesttagdistance'],
node=data['node'][:12])
else:
return meta('0.0', node=data.get('node', '')[:12])
def parse_archival(root):
archival = os.path.join(root, '.hg_archival.txt')
data = data_from_mime(archival)
return archival_to_version(data)
| {
"content_hash": "dfa09ed21ac77893fa21027e4e936226",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 69,
"avg_line_length": 29.96825396825397,
"alnum_prop": 0.5810381355932204,
"repo_name": "esben/setuptools_scm",
"id": "e7a77e1c923af7a0705a1db2aab626e6cc35a218",
"size": "1888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setuptools_scm/hg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29521"
}
],
"symlink_target": ""
} |
"""
Netmiko SCP operations.
Supports file get and file put operations.
SCP requires a separate SSH connection for a control channel.
Currently only supports Cisco IOS and Cisco ASA.
"""
from __future__ import print_function
from __future__ import unicode_literals
import re
import os
import hashlib
import time
import io
import scp
class SCPConn(object):
"""
Establish a secure copy channel to the remote network device.
Must close the SCP connection to get the file to write to the remote filesystem
"""
def __init__(self, ssh_conn):
self.ssh_ctl_chan = ssh_conn
self.establish_scp_conn()
def establish_scp_conn(self):
"""Establish the secure copy connection."""
ssh_connect_params = self.ssh_ctl_chan._connect_params_dict()
self.scp_conn = self.ssh_ctl_chan._build_ssh_client()
self.scp_conn.connect(**ssh_connect_params)
self.scp_client = scp.SCPClient(self.scp_conn.get_transport())
def scp_transfer_file(self, source_file, dest_file):
"""Put file using SCP (for backwards compatibility)."""
self.scp_client.put(source_file, dest_file)
def scp_get_file(self, source_file, dest_file):
"""Get file using SCP."""
self.scp_client.get(source_file, dest_file)
def scp_put_file(self, source_file, dest_file):
"""Put file using SCP."""
self.scp_client.put(source_file, dest_file)
def close(self):
"""Close the SCP connection."""
self.scp_conn.close()
class FileTransfer(object):
"""Class to manage SCP file transfer and associated SSH control channel."""
def __init__(self, ssh_conn, source_file, dest_file, file_system=None, direction='put'):
self.ssh_ctl_chan = ssh_conn
self.source_file = source_file
self.dest_file = dest_file
self.direction = direction
if not file_system:
self.file_system = self.ssh_ctl_chan._autodetect_fs()
else:
self.file_system = file_system
if direction == 'put':
self.source_md5 = self.file_md5(source_file)
self.file_size = os.stat(source_file).st_size
elif direction == 'get':
self.source_md5 = self.remote_md5(remote_file=source_file)
self.file_size = self.remote_file_size(remote_file=source_file)
else:
raise ValueError("Invalid direction specified")
def __enter__(self):
"""Context manager setup"""
self.establish_scp_conn()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Context manager cleanup."""
self.close_scp_chan()
if exc_type is not None:
raise exc_type(exc_value)
def establish_scp_conn(self):
"""Establish SCP connection."""
self.scp_conn = SCPConn(self.ssh_ctl_chan)
def close_scp_chan(self):
"""Close the SCP connection to the remote network device."""
self.scp_conn.close()
self.scp_conn = None
def remote_space_available(self, search_pattern=r"bytes total \((.*) bytes free\)"):
"""Return space available on remote device."""
remote_cmd = "dir {0}".format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
return int(match.group(1))
def local_space_available(self):
"""Return space available on local filesystem."""
destination_stats = os.statvfs(".")
return destination_stats.f_bsize * destination_stats.f_bavail
def verify_space_available(self, search_pattern=r"bytes total \((.*) bytes free\)"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == 'put':
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == 'get':
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == 'put':
if not remote_cmd:
remote_cmd = "dir {0}/{1}".format(self.file_system, self.dest_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = r"Directory of .*{0}".format(self.dest_file)
if 'Error opening' in remote_out:
return False
elif re.search(search_string, remote_out):
return True
else:
raise ValueError("Unexpected output from check_file_exists")
elif self.direction == 'get':
return os.path.exists(self.dest_file)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
if remote_file is None:
remote_file = self.dest_file
if not remote_cmd:
remote_cmd = "dir {0}/{1}".format(self.file_system, remote_file)
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
# Strip out "Directory of flash:/filename line
remote_out = re.split(r"Directory of .*", remote_out)
remote_out = "".join(remote_out)
# Match line containing file name
escape_file_name = re.escape(remote_file)
pattern = r".*({0}).*".format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
line = match.group(0)
# Format will be 26 -rw- 6738 Jul 30 2016 19:49:50 -07:00 filename
file_size = line.split()[2]
if 'Error opening' in remote_out:
raise IOError("Unable to find file on remote system")
else:
return int(file_size)
def file_md5(self, file_name):
"""Compute MD5 hash of file."""
with open(file_name, "rb") as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash
@staticmethod
def process_md5(md5_output, pattern=r"= (.*)"):
"""
Process the string to retrieve the MD5 hash
Output from Cisco IOS (ASA is similar)
.MD5 of flash:file_name Done!
verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2
"""
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError("Invalid output from MD5 command: {0}".format(md5_output))
def compare_md5(self, base_cmd='verify /md5'):
"""Compare md5 of file on network device to md5 of local file"""
if self.direction == 'put':
remote_md5 = self.remote_md5(base_cmd=base_cmd)
return self.source_md5 == remote_md5
elif self.direction == 'get':
local_md5 = self.file_md5(self.dest_file)
return self.source_md5 == local_md5
def remote_md5(self, base_cmd='verify /md5', remote_file=None):
"""
Calculate remote MD5 and return the checksum.
This command can be CPU intensive on the remote device.
"""
if remote_file is None:
remote_file = self.dest_file
remote_md5_cmd = "{0} {1}{2}".format(base_cmd, self.file_system, remote_file)
dest_md5 = self.ssh_ctl_chan.send_command_expect(remote_md5_cmd, delay_factor=3.0)
dest_md5 = self.process_md5(dest_md5)
return dest_md5
def transfer_file(self):
"""SCP transfer file."""
if self.direction == 'put':
self.put_file()
elif self.direction == 'get':
self.get_file()
def get_file(self):
"""SCP copy the file from the remote device to local system."""
self.scp_conn.scp_get_file(self.source_file, self.dest_file)
self.scp_conn.close()
def put_file(self):
"""SCP copy the file from the local system to the remote device."""
destination = "{0}{1}".format(self.file_system, self.dest_file)
if ':' not in destination:
raise ValueError("Invalid destination file system specified")
self.scp_conn.scp_transfer_file(self.source_file, destination)
# Must close the SCP connection to get the file written (flush)
self.scp_conn.close()
def verify_file(self):
"""Verify the file has been transferred correctly."""
return self.compare_md5()
def enable_scp(self, cmd=None):
"""
Enable SCP on remote device.
Defaults to Cisco IOS command
"""
if cmd is None:
cmd = ['ip scp server enable']
elif not hasattr(cmd, '__iter__'):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
def disable_scp(self, cmd=None):
"""
Disable SCP on remote device.
Defaults to Cisco IOS command
"""
if cmd is None:
cmd = ['no ip scp server enable']
elif not hasattr(cmd, '__iter__'):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
class InLineTransfer(FileTransfer):
"""Use TCL on Cisco IOS to directly transfer file."""
def __init__(self, ssh_conn, source_file=None, dest_file=None, file_system=None,
direction='put', source_config=None):
if source_file and source_config:
msg = "Invalid call to InLineTransfer both source_file and source_config specified."
raise ValueError(msg)
if direction != 'put':
raise ValueError("Only put operation supported by InLineTransfer.")
self.ssh_ctl_chan = ssh_conn
if source_file:
self.source_file = source_file
self.source_config = None
self.source_md5 = self.file_md5(source_file)
self.file_size = os.stat(source_file).st_size
elif source_config:
self.source_file = None
self.source_config = source_config
self.source_md5 = self.config_md5(source_config)
self.file_size = len(source_config.encode('UTF-8'))
self.dest_file = dest_file
self.direction = direction
if not file_system:
self.file_system = self.ssh_ctl_chan._autodetect_fs()
else:
self.file_system = file_system
@staticmethod
def _read_file(file_name):
with io.open(file_name, "rt", encoding='utf-8') as f:
return f.read()
@staticmethod
def _tcl_newline_rationalize(tcl_string):
"""
When using put inside a TCL {} section the newline is considered a new TCL
statement and causes a missing curly-brace message. Convert "\n" to "\r". TCL
will convert the "\r" to a "\n" i.e. you will see a "\n" inside the file on the
Cisco IOS device.
"""
NEWLINE = r"\n"
CARRIAGE_RETURN = r"\r"
tmp_string = re.sub(NEWLINE, CARRIAGE_RETURN, tcl_string)
if re.search(r"[{}]", tmp_string):
msg = "Curly brace detected in string; TCL requires this be escaped."
raise ValueError(msg)
return tmp_string
def __enter__(self):
self._enter_tcl_mode()
return self
def __exit__(self, exc_type, exc_value, traceback):
_ = self._exit_tcl_mode() # noqa
if exc_type is not None:
raise exc_type(exc_value)
def _enter_tcl_mode(self):
TCL_ENTER = 'tclsh'
cmd_failed = ['Translating "tclsh"', '% Unknown command', '% Bad IP address']
output = self.ssh_ctl_chan.send_command(TCL_ENTER, expect_string=r"\(tcl\)#",
strip_prompt=False, strip_command=False)
for pattern in cmd_failed:
if pattern in output:
raise ValueError("Failed to enter tclsh mode on router: {}".format(output))
return output
def _exit_tcl_mode(self):
TCL_EXIT = 'tclquit'
self.ssh_ctl_chan.write_channel("\r")
time.sleep(1)
output = self.ssh_ctl_chan.read_channel()
if '(tcl)' in output:
self.ssh_ctl_chan.write_channel(TCL_EXIT + "\r")
time.sleep(1)
output += self.ssh_ctl_chan.read_channel()
return output
def establish_scp_conn(self):
raise NotImplementedError
def close_scp_chan(self):
raise NotImplementedError
def local_space_available(self):
raise NotImplementedError
def file_md5(self, file_name):
"""Compute MD5 hash of file."""
file_contents = self._read_file(file_name)
file_contents = file_contents + '\n' # Cisco IOS automatically adds this
file_contents = file_contents.encode('UTF-8')
return hashlib.md5(file_contents).hexdigest()
def config_md5(self, source_config):
"""Compute MD5 hash of file."""
file_contents = source_config + '\n' # Cisco IOS automatically adds this
file_contents = file_contents.encode('UTF-8')
return hashlib.md5(file_contents).hexdigest()
def put_file(self):
curlybrace = r'{'
TCL_FILECMD_ENTER = 'puts [open "{}{}" w+] {}'.format(self.file_system,
self.dest_file, curlybrace)
TCL_FILECMD_EXIT = '}'
if self.source_file:
file_contents = self._read_file(self.source_file)
elif self.source_config:
file_contents = self.source_config
file_contents = self._tcl_newline_rationalize(file_contents)
# Try to remove any existing data
self.ssh_ctl_chan.clear_buffer()
self.ssh_ctl_chan.write_channel(TCL_FILECMD_ENTER)
time.sleep(.25)
self.ssh_ctl_chan.write_channel(file_contents)
self.ssh_ctl_chan.write_channel(TCL_FILECMD_EXIT + "\r")
# This operation can be slow (depends on the size of the file)
max_loops = 400
sleep_time = 4
if self.file_size >= 2500:
max_loops = 1500
sleep_time = 12
elif self.file_size >= 7500:
max_loops = 3000
sleep_time = 25
# Initial delay
time.sleep(sleep_time)
# File paste and TCL_FILECMD_exit should be indicated by "router(tcl)#"
output = self.ssh_ctl_chan._read_channel_expect(pattern=r"\(tcl\)", max_loops=max_loops)
# The file doesn't write until tclquit
TCL_EXIT = 'tclquit'
self.ssh_ctl_chan.write_channel(TCL_EXIT + "\r")
time.sleep(1)
# Read all data remaining from the TCLSH session
output += self.ssh_ctl_chan._read_channel_expect(max_loops=max_loops)
return output
def get_file(self):
raise NotImplementedError
def enable_scp(self, cmd=None):
raise NotImplementedError
def disable_scp(self, cmd=None):
raise NotImplementedError
| {
"content_hash": "41563f89016839a332b6db09bcac2c64",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 96,
"avg_line_length": 36.896805896805894,
"alnum_prop": 0.595658253978824,
"repo_name": "michaelrosejr/pyaos6",
"id": "6a2649812d8eabd037b62b552a871726a51f8059",
"size": "15017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netmiko/scp_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186245"
}
],
"symlink_target": ""
} |
"""Implements functions for testing for Commonly Targeted Genes (CTGs)."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from typing import Tuple, Optional, Iterable, Callable, Pattern
import itertools
import logging
import operator
import re
import pathlib2 as pathlib
import numpy as np
import pandas as pd
import pyfaidx
import pysam
from intervaltree import IntervalTree
from scipy.stats import poisson
from imfusion.build import Reference
from imfusion.model import Insertion
from imfusion.util.genomic import GenomicIntervalTree
from imfusion.util.tabix import GtfIterator
def test_ctgs(
insertions, # type: List[Insertion]
reference, # type: Reference
gene_ids=None, # type: Set[str]
chromosomes=None, # type: Set[str]
pattern=None, # type: str
per_sample=True, # type: bool
window=None #type: Tuple[int, int]
):
"""Identifies genes that are significantly enriched for insertions (CTGs).
This function takes a DataFrame of insertions, coming from multiple samples,
and identifies if any genes are more frequently affected by an
insertion than would be expected by chance. These genes are called
Commonly Targeted Genes (CTGs). CTGs are selected by comparing the
number of insertions within the gene to the number of insertions
that would be expected from the background insertion rate, which is
modeled using a Poisson distribution.
Parameters
----------
insertions : List[Insertion]
Insertions to test.
reference : Reference
Reference index used by the aligner to identify insertions.
genes : List[str]
List of genes to test (defaults to all genes with an insertion).
chromosomes : List[str]
List of chromosomes to include, defaults to all chromosomes
shared between the reference sequence and the reference gtf.
pattern : str
Specificity pattern of the used transposon.
per_sample : bool
Whether to perform the per sample test (recommended), which
effectively collapes insertions per sample/gene combination.
This avoids issues in which insertions that are detected
multiple times or that may have hopped inside the gene locus
are counted multiple times.
window : Tuple[int, int]
Window to include around gene (in bp). Specified as (upstream_dist,
downstream_dist). For example: (2000, 2000) specifies in a 2KB
window around each gene.
Returns
-------
pandas.DataFrame
Results of CTG test for tested genes. Contains two columns:
p_value and q_value. The last column (q_value)
represents the p-value of the gene after correcting for
multiple testing using bonferroni correction.
"""
# Default to shared chromosome sequences (typically drops some
# of the more esoteric extra scaffold/patch sequences).
if chromosomes is None:
reference_seq = pyfaidx.Fasta(str(reference.fasta_path))
reference_gtf = GtfIterator(reference.indexed_gtf_path)
chromosomes = list(
set(reference_seq.keys()) & set(reference_gtf.contigs))
if len(chromosomes) == 0:
ValueError('No chromosomes are shared between the reference '
'sequence and reference gtf files')
if len(chromosomes) == 0:
raise ValueError('At least one chromosome must be given')
# Determine gene windows using GTF.
logging.info('Generating gene windows')
gene_windows = _build_gene_windows(
reference.indexed_gtf_path, window=window, chromosomes=chromosomes)
# Subset insertions to gene intervals.
insertions = _subset_to_windows(insertions, gene_windows)
if gene_ids is None:
gene_ids = set(ins.metadata['gene_id'] for ins in insertions)
# Collapse insertions per gene/sample (recommended).
# Corrects for hopping/multiple detection issues.
if per_sample:
logging.info('Collapsing insertions')
insertions = list(_collapse_per_sample(insertions))
# Calculate total number of pattern occurrences within intervals.
logging.info('Counting pattern occurrences')
reference_seq = pyfaidx.Fasta(str(reference.fasta_path))
total = count_total(
reference_seq, pattern=pattern, intervals=gene_windows.values())
# Calculate p-values for each gene.
logging.info('Calculating significance for genes')
insertion_trees = GenomicIntervalTree.from_objects_position(
insertions, chrom_attr='seqname')
p_values = {
gene_id: test_region(
insertions=insertions,
reference_seq=reference_seq,
region=gene_windows[gene_id],
total=total,
pattern=pattern,
filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],
insertion_trees=insertion_trees)
for gene_id in gene_ids
}
# Build result frame.
result = pd.DataFrame.from_records(
iter(p_values.items()), columns=['gene_id', 'p_value'])
# Calculate corrected p-value using bonferroni correction.
result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)
# Sort by q-value and p-value.
result.sort_values(by=['q_value', 'p_value'], inplace=True)
if len(insertions) > 0:
# Annotate with gene_name if possible.
if 'gene_name' in insertions[0].metadata:
name_map = {
ins.metadata['gene_id']: ins.metadata['gene_name']
for ins in insertions
}
result.insert(1, 'gene_name', result['gene_id'].map(name_map))
else:
result['gene_name'] = np.nan
# Annotate with frequency.
frequency = (Insertion.to_frame(insertions)
.groupby('gene_id')['sample'].nunique()
.reset_index(name='n_samples'))
result = pd.merge(result, frequency, on='gene_id', how='left')
else:
result['gene_name'] = np.nan
result['n_samples'] = np.nan
return result
def _build_gene_windows(
gtf_path, # type: pathlib.Path
window=None, # type: Optional[Tuple[int, int]]
chromosomes=None # type: Set[str]
):
gtf_iter = GtfIterator(gtf_path)
if chromosomes is None:
chromosomes = set(gtf_iter.contigs)
records = itertools.chain.from_iterable(
gtf_iter.fetch_genes(reference=chrom) for chrom in chromosomes)
return {rec['gene_id']: _apply_gene_window(rec, window) for rec in records}
def _apply_gene_window(
gene, # type: pysam.ctabixproxies.GTFProxy
window=None # type: Tuple[int, int]
): # type: (...) -> Tuple[str, int, int]
if window is None:
return gene.contig, gene.start, gene.end,
else:
upstream_offset, downstream_offset = window
if gene.strand == '-':
start = gene.start - downstream_offset
end = gene.end + upstream_offset
elif gene.strand == '+':
start = gene.start - upstream_offset
end = gene.end + downstream_offset
else:
raise ValueError('Unknown value for strand')
return gene.contig, start, end
def _subset_to_windows(
insertions, # type: List[Insertion]
gene_windows # type: Dict[str, Tuple[str, int, int]]
): # type: (...) -> List[Insertion]
"""Subsets insertions for given gene windows."""
# Create lookup trees.
trees = {
chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int)
for chrom, chrom_int in itertools.groupby(
sorted(gene_windows.values()), operator.itemgetter(0))
}
# Determine which insertions overlap tree intervals and
# correspond to genes with known gene window.
def _in_windows(ins, trees):
try:
return trees[ins.seqname].overlaps(ins.position)
except KeyError:
return False
return [
ins for ins in insertions
if ins.metadata['gene_id'] in gene_windows and _in_windows(ins, trees)
]
def _collapse_per_sample(insertions):
# Type: (List[Insertion]) -> Generator
def _keyfunc(insertion):
return (insertion.metadata['sample'],
str(insertion.metadata['gene_id']))
grouped = itertools.groupby(sorted(insertions, key=_keyfunc), key=_keyfunc)
for _, grp in grouped:
grp = list(grp)
if len(grp) > 1:
mean_pos = int(np.mean([ins.position for ins in grp]))
yield grp[0]._replace(position=mean_pos)
else:
yield grp[0]
def test_region(
insertions, # type: List[Insertion]
reference_seq, # type: pyfaidx.Fasta
region, # type: Tuple[str, int, int]
pattern=None, # type: Optional[str]
intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]]
total=None, # type: Optional[int]
filters=None, # type: Optional[List[Callable]]
insertion_trees=None # type: GenomicIntervalTree
): # type: (...) -> float
"""Tests a given genomic region for enrichment in insertions."""
if total is None:
total = count_total(
reference_seq, pattern=pattern, intervals=intervals)
# Count pattern in region.
region_count = count_region(reference_seq, region=region, pattern=pattern)
# Sub-select insertions for region.
if insertion_trees is None:
insertion_trees = GenomicIntervalTree.from_objects_position(
insertions, chrom_attr='seqname')
region_ins = set(interval[2]
for interval in insertion_trees.search(*region))
# Apply additional filter functions to insertions if given
# (such as filtering on gene name/id for example).
if filters is not None:
for filter_func in filters:
region_ins = set(ins for ins in region_ins if filter_func(ins))
# Calculate p-value.
x = len(list(region_ins))
mu = len(insertions) * (region_count / total)
# Note here we use loc=1, because we are interested in
# calculating P(X >= x), not P(X > x) (the default
# surivival function).
p_val = poisson.sf(x, mu=mu, loc=1) # type: float
return p_val
def count_region(
reference_seq, # type: pyfaidx.Fasta
region, # type: Tuple[str, int, int]
pattern=None # type: Optional[str]
): # type: (...) -> int
"""Counts occurrences of pattern within given genomic region.
Parameters
----------
reference : pyfaidx.Fasta
Reference to count occurrences for.
region: Tuple[str, int, int]
Genomic region to search in.
pattern : str
Nucleotide sequence to count occurences for. If None, the
length of the region is used.
Returns
-------
int
Number of occurrences of the pattern within the given region.
"""
chrom, start, end = region
seq = reference_seq[chrom][int(start):int(end)]
return _count_sequence(seq, regex=_build_regex(pattern))
def _build_regex(pattern):
# type: (str) -> Pattern[str]
if pattern is not None:
return re.compile(pattern + '|' + pattern[::-1])
return None
def _count_sequence(sequence, regex=None):
# type: (pyfaidx.Sequence, Pattern[str]) -> int
"""Counts occurrences of pattern in sequence.
Parameters
----------
sequence : pyfaidx.Sequence
Sequence to search.
regex : Pattern[str]
Pattern to count.
Returns
-------
int: Number of occurrences of pattern.
"""
if regex is None:
count = len(sequence)
else:
count = sum((1 for _ in regex.finditer(str(sequence))))
return count
def count_total(
reference_seq, # type: pyfaidx.Sequence
pattern=None, # type: str
intervals=None # type: Iterable[Tuple[str, int, int]]
): # type: (...) -> int
"""Counts total occurrences of pattern in reference.
Parameters
----------
reference : pyfaidx.Fasta
Reference to count occurrences for.
pattern : str
Nucleotide sequence to search for. If None, the length of
sequences is counted instead of pattern of occurrences.
intervals : List[tuple(str, int, int)]
List of genomic intervals to which search should be restricted.
If None, the entire reference is used.
Returns
-------
int
Number of occurrences of the pattern within the given reference,
or within the given intervals (if applicable).
"""
regex = _build_regex(pattern)
if intervals is None:
# Simply count for the entire sequence.
count = sum(_count_sequence(reference_seq[seq], regex=regex)
for seq in reference_seq.keys()) # yapf: disable
else:
# Flatten intervals, and then only count for sequences
# within the flattened intervals.
merged_intervals = list(merge_genomic_intervals(intervals))
seqs = [
reference_seq[chrom][start:end]
for chrom, start, end in merged_intervals
]
count = sum(_count_sequence(seq, regex=regex) for seq in seqs)
return count
def merge_genomic_intervals(intervals):
# type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]]
"""Merges overlapping genomic intervals.
Parameters
----------
intervals : List[tuple(str, int, int)]
List of intervals to merge. Intervals are specified as tuples
of (chomosome, start_position, end_position) values.
Yields:
tuple(str, int, int)
Next merged interval.
"""
# Group intervals by chromosome.
grouped_intervals = itertools.groupby(
sorted(intervals), operator.itemgetter(0))
# Now yield merged intervals per chromosome.
for chrom, grp in grouped_intervals:
chrom_intervals = [interval[1:] for interval in grp]
for low, high in merge_intervals(chrom_intervals, is_sorted=True):
yield chrom, low, high
def merge_intervals(
intervals, # type: Iterable[Tuple[int, int]]
is_sorted=False # type: Optional[bool]
): # type: (...) -> Iterable[Tuple[int, int]]
"""Merges overlapping intervals.
Parameters
----------
intervals : List[tuple(int, int)]
List of intervals to merge. Intervals are specified as tuples
of (start_position, end_position) values.
is_sorted : bool
Indicates if the intervals have already been sorted. Avoids
re-sorting an already sorted list.
Yields:
tuple(int, int)
Next merged interval.
"""
if not is_sorted:
intervals = sorted(intervals, key=operator.itemgetter(0))
else:
# Ensure intervals is a list.
intervals = list(intervals)
if not intervals:
# No intervals to merge.
return
# Low and high represent the bounds of
# the current run of merges.
low, high = intervals[0]
for iv in intervals[1:]:
if iv[0] <= high:
# New interval overlaps current run,
# merge with the current run.
high = max(high, iv[1])
else:
# Current run is over, yield accumulated
# interval and start new run.
yield low, high
low, high = iv
yield low, high
| {
"content_hash": "3381d6aa3703366e57810913e22ddc57",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 80,
"avg_line_length": 32.163223140495866,
"alnum_prop": 0.6339692940194,
"repo_name": "jrderuiter/im-fusion",
"id": "b833d19797d1484b5835be575c0ee6bc893cd54f",
"size": "15591",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/imfusion/ctg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295577"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from colors import green
def greet(greetee):
"""Given the name, return a greeting for a person of that name."""
return green('Hello, %s!' % greetee)
| {
"content_hash": "59eebb4e97919744fe1414846464948a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 93,
"avg_line_length": 34.22222222222222,
"alnum_prop": 0.6753246753246753,
"repo_name": "Ervii/garage-time",
"id": "00aca5350487f3526e1221e9d128322914267a2f",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "garage/examples/src/python/example/hello/greet/greet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "HTML",
"bytes": "64603"
},
{
"name": "Java",
"bytes": "43275"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Protocol Buffer",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "2200035"
},
{
"name": "Scala",
"bytes": "6693"
},
{
"name": "Shell",
"bytes": "29352"
},
{
"name": "Thrift",
"bytes": "1946"
}
],
"symlink_target": ""
} |
import sys
from pathlib import Path
def check_if_available(prg, msg):
"""
Check if the given program is available.
If not, then die with the given error message.
"""
if not Path(prg).is_file():
print(msg, file=sys.stderr)
sys.exit(1)
| {
"content_hash": "08a80c03f60089bee32abe095e153f10",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 50,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6286764705882353,
"repo_name": "jabbalaci/Bash-Utils",
"id": "00a4973502d65e24a2b2193e21fc6079af986df3",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/fs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "67"
},
{
"name": "D",
"bytes": "80"
},
{
"name": "Go",
"bytes": "86"
},
{
"name": "Java",
"bytes": "181"
},
{
"name": "Nim",
"bytes": "197"
},
{
"name": "Perl",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "166076"
},
{
"name": "Shell",
"bytes": "760"
}
],
"symlink_target": ""
} |
import os
import benchexec.tools.template
import benchexec.result as result
from benchexec.tools.template import ToolNotFoundException
class Tool(benchexec.tools.template.BaseTool2):
"""
This class serves as tool adaptor for Map2Check (https://github.com/hbgit/Map2Check)
"""
REQUIRED_PATHS_6 = [
"__init__.py",
"map2check.py",
"map2check-wrapper.sh",
"modules",
]
REQUIRED_PATHS_7_1 = ["map2check", "map2check-wrapper.py", "bin", "include", "lib"]
def executable(self, tool_locator):
try:
executable = tool_locator.find_executable("map2check-wrapper.sh")
self._version = 6
except ToolNotFoundException:
executable = tool_locator.find_executable("map2check-wrapper.py")
self._version = 7
return executable
def program_files(self, executable):
"""
Determine the file paths to be adopted
"""
if self._version == 6:
paths = self.REQUIRED_PATHS_6
elif self._version > 6:
paths = self.REQUIRED_PATHS_7_1
return paths
def working_directory(self, executable):
executableDir = os.path.dirname(executable)
return executableDir
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "Map2Check"
def cmdline(self, executable, options, task, rlimits):
assert task.property_file, "property file required"
if self._version == 6:
return (
[executable]
+ options
+ ["-c", task.property_file, task.single_input_file]
)
elif self._version > 6:
return (
[executable]
+ options
+ ["-p", task.property_file, task.single_input_file]
)
assert False, "Unexpected version " + self._version
def determine_result(self, run):
output = run.output
if not output:
return result.RESULT_UNKNOWN
output = output[-1].strip()
status = result.RESULT_UNKNOWN
if self._version > 6:
if output.endswith("TRUE"):
status = result.RESULT_TRUE_PROP
elif "FALSE" in output:
if "FALSE_MEMTRACK" in output:
status = result.RESULT_FALSE_MEMTRACK
elif "FALSE_MEMCLEANUP" in output:
status = result.RESULT_FALSE_MEMCLEANUP
elif "FALSE_DEREF" in output:
status = result.RESULT_FALSE_DEREF
elif "FALSE_FREE" in output:
status = result.RESULT_FALSE_FREE
elif "FALSE_OVERFLOW" in output:
status = result.RESULT_FALSE_OVERFLOW
else:
status = result.RESULT_FALSE_REACH
elif output.endswith("UNKNOWN"):
status = result.RESULT_UNKNOWN
elif run.was_timeout:
status = result.RESULT_TIMEOUT
else:
status = "ERROR"
elif self._version == 6:
if output.endswith("TRUE"):
status = result.RESULT_TRUE_PROP
elif "FALSE" in output:
if "FALSE(valid-memtrack)" in output:
status = result.RESULT_FALSE_MEMTRACK
elif "FALSE(valid-deref)" in output:
status = result.RESULT_FALSE_DEREF
elif "FALSE(valid-free)" in output:
status = result.RESULT_FALSE_FREE
elif output.endswith("UNKNOWN"):
status = result.RESULT_UNKNOWN
elif run.was_timeout:
status = result.RESULT_TIMEOUT
else:
status = "ERROR"
return status
| {
"content_hash": "411d68707b32151d6c7204af16358ad9",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 88,
"avg_line_length": 33.35344827586207,
"alnum_prop": 0.543292840527268,
"repo_name": "sosy-lab/benchexec",
"id": "f0261c1e222a8af5e75192e1089d6b369d4dfb4c",
"size": "4099",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "benchexec/tools/map2check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3210"
},
{
"name": "CSS",
"bytes": "609"
},
{
"name": "Dockerfile",
"bytes": "3164"
},
{
"name": "Gnuplot",
"bytes": "5032"
},
{
"name": "HTML",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "75586"
},
{
"name": "Jinja",
"bytes": "285"
},
{
"name": "PHP",
"bytes": "4241"
},
{
"name": "Python",
"bytes": "1218836"
},
{
"name": "Roff",
"bytes": "3145"
},
{
"name": "SCSS",
"bytes": "25181"
},
{
"name": "Shell",
"bytes": "7671"
},
{
"name": "TeX",
"bytes": "7458"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "impulse.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "6c63c1f396d217bfe135f21965518895",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "akurihara/impulse",
"id": "60058c19e0700492f4cf066d7e50bed383caff37",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4407"
},
{
"name": "Python",
"bytes": "69185"
}
],
"symlink_target": ""
} |
import fixtures
import constants
from authomatic.providers import oauth2
conf = fixtures.get_configuration('eventbrite')
CONFIG = {
'login_xpath': ('//*[@id="authentication-container"]'
'/div/form/div[3]/div[1]/input'),
'password_xpath': ('//*[@id="authentication-container"]'
'/div/form/div[3]/div[2]/input'),
'consent_xpaths': [
'//*[@id="authentication-container"]/div/form/div[3]/div[4]/input',
'//*[@id="access_choices_allow"]',
],
'class_': oauth2.Eventbrite,
'scope': oauth2.Eventbrite.user_info_scope,
'user': {
'birth_date': None,
'city': None,
'country': None,
'email': conf.user_email,
'first_name': conf.user_first_name,
'gender': None,
'id': conf.user_id,
'last_name': conf.user_last_name,
'link': None,
'locale': None,
'name': conf.user_name,
'nickname': None,
'phone': None,
'picture': None,
'postal_code': None,
'timezone': None,
'username': None,
},
'content_should_contain': [
conf.user_email,
conf.user_first_name,
conf.user_last_name,
conf.user_name,
conf.user_id,
# User info JSON keys
'emails', 'email', 'verified', 'primary', 'id', 'name', 'first_name',
'last_name'
],
# Case insensitive
'content_should_not_contain':
conf.no_birth_date +
conf.no_gender +
conf.no_locale +
conf.no_location +
conf.no_nickname +
conf.no_phone +
conf.no_timezone
,
# True means that any thruthy value is expected
'credentials': {
'token_type': 'Bearer',
'provider_type_id': '2-17',
'_expiration_time': None,
'consumer_key': None,
'provider_id': None,
'consumer_secret': None,
'token': True,
'token_secret': None,
'_expire_in': True,
'provider_name': 'eventbrite',
'refresh_token': None,
'provider_type': 'authomatic.providers.oauth2.OAuth2',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
},
} | {
"content_hash": "811ef0a68b8e8173f5e7224467ba53ef",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 29.77027027027027,
"alnum_prop": 0.5365410803449842,
"repo_name": "touilleMan/authomatic",
"id": "06aa840c4913a9f99506e7e2c0c142cfbf4ff98c",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional_tests/expected_values/eventbrite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "565731"
},
{
"name": "CoffeeScript",
"bytes": "26033"
},
{
"name": "JavaScript",
"bytes": "4916"
},
{
"name": "Makefile",
"bytes": "5589"
},
{
"name": "Python",
"bytes": "371281"
},
{
"name": "Ruby",
"bytes": "1346"
},
{
"name": "Shell",
"bytes": "6825"
}
],
"symlink_target": ""
} |
"""Defines the output class for the adapter layers' parameters."""
from dataclasses import dataclass
import torch
@dataclass
class SamplerOutput:
"""Base class for the base and weights of each adapter."""
weight: torch.FloatTensor = None
bias: torch.FloatTensor = None
@dataclass
class LayerNormOutput:
"""Base class for the base and weights of the conditional
layer norms."""
weight: torch.FloatTensor = None
bias: torch.FloatTensor = None
@dataclass
class AdapterOutput:
"""Base class for each adapter weights"""
up: SamplerOutput = None
down: SamplerOutput = None
pre_norm: LayerNormOutput = None
post_norm: LayerNormOutput = None
@dataclass
class AdapterT5BlockOutput:
"""
Base class for adapter layer's outputs.
"""
feed_forward: AdapterOutput = None
self_attention: AdapterOutput = None
| {
"content_hash": "5a8975ce681dbb93e315902a4c8d1ee1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 66,
"avg_line_length": 24.13888888888889,
"alnum_prop": 0.7111622554660529,
"repo_name": "google-research/ruse",
"id": "629b982ec54905c8f8c7e4489c6f33d2d16009d0",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "seq2seq/adapters/adapter_outputs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import unittest
import unittest.mock
import os
import cachediff
class TestAssemblyInstruction(unittest.TestCase):
def test_simple(self):
line = '4005d6: 48 83 c4 08 add $0x8,%rsp'
ai = cachediff.AssemblyLine(line)
self.assertEqual(ai.get_virtual_address(), 0x4005d6)
class TestHighLine(unittest.TestCase):
def setUp(self):
lineno = 8
instructions = '''
40051a: c7 45 f8 00 00 00 00 movl $0x0,-0x8(%rbp)
400521: eb 44 jmp 400567 <main+0x61>
400563: 83 45 f8 01 addl $0x1,-0x8(%rbp)
400567: 83 7d f8 63 cmpl $0x63,-0x8(%rbp)
40056b: 7e b6 jle 400523 <main+0x1d>
'''
self.hl = cachediff.HighLine(lineno, instructions)
def test_get_virtual_addresss(self):
expected = set([0x40051a, 0x400521, 0x400563, 0x400567,
0x40056b, ])
self.assertEqual(self.hl.get_virtual_addresses(),
expected)
def test_has_virtual_address(self):
self.assertTrue(self.hl.has_virtual_address(0x40051a))
self.assertFalse(self.hl.has_virtual_address(0x30051a))
class TestFile(unittest.TestCase):
def setUp(self):
file_path = os.path.join(os.getcwd(), 'test_samples',
'test_file.c')
file_path1 = os.path.join(os.getcwd(), 'test_samples',
'qsort.c')
dumpfile = os.path.join(os.getcwd(), 'test_samples',
'test_file_dump.dump')
dumpfile1 = os.path.join(os.getcwd(), 'test_samples',
'qsort_dump.dump')
test_prefix = '/home/saimadhav/cachediff/test_samples/'
self.f = cachediff.File(file_path, dumpfile,
test_prefix+'test_file.c')
self.f1 = cachediff.File(file_path)
self.f2 = cachediff.File(file_path1, dumpfile1,
test_prefix+'qsort.c')
def test_get_high_level_lines(self):
temp = self.f.get_high_level_lines()
self.assertEqual(len(temp), self.f.get_line_count())
# temp[0] corresponds to line3 in test_file_dump.dump
self.assertTrue(temp[0].has_virtual_address(0x400506))
def test_get_line_count(self):
self.assertEqual(self.f.get_line_count(), 5) # see test_file_dump.dump
self.assertEqual(self.f2.get_line_count(), 13)
def test_get_line(self):
hl = self.f.get_line(0x400506)
self.assertEqual(hl.lineno, 3)
hl = self.f.get_line(0x400523)
self.assertEqual(hl.lineno, 10)
with self.assertRaises(ValueError):
hl = self.f.get_line(0x400580)
with self.assertRaises(ValueError):
hl = self.f.get_line(0x123456)
hl = self.f2.get_line(0x4006e4)
self.assertEqual(hl.lineno, 20)
class TestRun(unittest.TestCase):
def setUp(self):
self.run = unittest.mock.Mock()
self.run.transform_trace_file = cachediff.Run.transform_trace_file
self.pintrace = os.path.join(os.getcwd(), 'test_samples',
'pintrace.out')
class TestResult(unittest.TestCase):
def setUp(self):
path = os.path.join(os.getcwd(), 'test_samples',
'dinero_output')
self.result = cachediff.Result(path)
self.result1 = cachediff.Result(path)
def test_diff(self):
parm = ['l1_icache_instrn_fetches',
'l1_dcache_read_fetches',
'l1_icache_instrn_miss_rate',
'l2_ucache_misses',
'l3_ucache_bytes_to_memory']
tmp = self.result.get_diff(self.result1, parm)
for i in tmp.values():
self.assertEqual(i, 0)
with self.assertRaises(ValueError):
tmp = self.result.get_diff(self.result1, ['ABC'])
def test_simple(self):
results = self.result.results
self.assertEqual(results['l1_icache_instrn_fetches'], 215903)
self.assertEqual(results['l1_icache_instrn_misses'], 814)
self.assertEqual(results['l1_icache_instrn_miss_rate'], 0.0038)
self.assertEqual(results['l1_icache_bytes_from_memory'], 52096)
self.assertEqual(results['l1_dcache_data_fetches'], 79157)
self.assertEqual(results['l1_dcache_read_fetches'], 55538)
self.assertEqual(results['l1_dcache_write_fetches'], 23619)
self.assertEqual(results['l1_dcache_data_misses'], 3109)
self.assertEqual(results['l1_dcache_read_misses'], 2548)
self.assertEqual(results['l1_dcache_write_misses'], 561)
self.assertEqual(results['l1_dcache_bytes_from_memory'], 198976)
self.assertEqual(results['l1_dcache_bytes_to_memory'], 42560)
self.assertEqual(results['l2_ucache_fetches'], 4588)
self.assertEqual(results['l2_ucache_instrn_fetches'], 814)
self.assertEqual(results['l2_ucache_data_fetches'], 3774)
self.assertEqual(results['l2_ucache_read_fetches'], 3109)
self.assertEqual(results['l2_ucache_misses'], 3218)
self.assertEqual(results['l2_ucache_instrn_misses'], 811)
self.assertEqual(results['l2_ucache_data_misses'], 2407)
self.assertEqual(results['l2_ucache_read_misses'], 2407)
self.assertEqual(results['l2_ucache_write_misses'], 0)
self.assertEqual(results['l2_ucache_bytes_from_memory'], 205952)
self.assertEqual(results['l2_ucache_bytes_to_memory'], 34496)
self.assertEqual(results['l3_ucache_fetches'], 3757)
self.assertEqual(results['l3_ucache_instrn_fetches'], 811)
self.assertEqual(results['l3_ucache_data_fetches'], 2946)
self.assertEqual(results['l3_ucache_read_fetches'], 2407)
self.assertEqual(results['l3_ucache_misses'], 3218)
self.assertEqual(results['l3_ucache_instrn_misses'], 811)
self.assertEqual(results['l3_ucache_data_misses'], 2407)
self.assertEqual(results['l3_ucache_read_misses'], 2407)
self.assertEqual(results['l3_ucache_write_misses'], 0)
self.assertEqual(results['l3_ucache_bytes_from_memory'], 205952)
self.assertEqual(results['l3_ucache_bytes_to_memory'], 34496)
class TestSingleContiguousDiff(unittest.TestCase):
def setUp(self):
cwd = os.getcwd()
f1_path = os.path.join(cwd, 'test_samples',
'test_file.c')
f2_path = os.path.join(cwd, 'test_samples',
'test_file_one.c')
f3_path = os.path.join(cwd, 'test_samples',
'test_file_two.c')
f5_path = os.path.join(cwd, 'test_samples',
'test_file_three.c')
self.f1 = cachediff.File(f1_path)
self.f2 = cachediff.File(f2_path)
self.f3 = cachediff.File(f3_path)
self.f4 = cachediff.File(f1_path)
self.f5 = cachediff.File(f5_path)
self.diff_one = cachediff.single_contiguous_diff(self.f1,
self.f2)
self.diff_two = cachediff.single_contiguous_diff(self.f1,
self.f4)
self.diff_three = cachediff.single_contiguous_diff(self.f3,
self.f5)
def test_diff_simple(self):
self.assertEqual(len(self.diff_one[0]), 1)
self.assertEqual(len(self.diff_one[1]), 1)
self.assertEqual(self.diff_one[0][0].lineno, 10)
self.assertEqual(self.diff_one[1][0].lineno, 10)
self.assertEqual(len(self.diff_three[0]), 1)
self.assertEqual(len(self.diff_three[1]), 3)
self.assertEqual(self.diff_three[1][1].lineno, 5)
def test_diff_empty(self):
self.assertEqual(self.diff_two, ([], []))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "252f294b430cdc6ab4522ca8f8fe1cc0",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 43.27717391304348,
"alnum_prop": 0.5854577420570137,
"repo_name": "sahutd/cachediff",
"id": "67bae4d746d36835b86202036899d7605fb1955b",
"size": "7963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_cachediff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1780"
},
{
"name": "C++",
"bytes": "4247"
},
{
"name": "Makefile",
"bytes": "676"
},
{
"name": "Python",
"bytes": "27047"
}
],
"symlink_target": ""
} |
def cycleSort(arr):
writes = 0
# Loop through the array to find cycles
for cycle in range(0, len(arr) - 1):
item = arr[cycle]
# Find the location to place the item
pos = cycle
for i in range(cycle + 1, len(arr)):
if arr[i] < item:
pos += 1
# If item already present, not a cycle
if pos == cycle:
continue
# Else, put the item there or after any of its duplicates
while item == arr[pos]:
pos += 1
arr[pos], item = item, arr[pos]
writes += 1
# Rotate the rest of the cycle
while pos != cycle:
# Find location to put the item
pos = cycle
for i in range(cycle + 1, len(arr)):
if arr[i] < item:
pos += 1
# Put the item there or after any duplicates
while item == arr[pos]:
pos += 1
arr[pos], item = item, arr[pos]
writes += 1
return writes
if __name__ == '__main__':
arr = [1, 6, 4, 7, 2, 8]
print("Unsorted Array: ", arr)
cycleSort(arr)
print("Sorted Array: ", arr)
| {
"content_hash": "2f60904d1436fc9eb31feff2a3baa12a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 65,
"avg_line_length": 25.869565217391305,
"alnum_prop": 0.4773109243697479,
"repo_name": "Deepak345/al-go-rithms",
"id": "9a7a2dfc215577211b4d42ad0abf4633f22eb9f9",
"size": "1190",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sort/cycle_sort/py/cycle_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "203468"
},
{
"name": "C#",
"bytes": "66836"
},
{
"name": "C++",
"bytes": "550920"
},
{
"name": "Clojure",
"bytes": "2606"
},
{
"name": "Common Lisp",
"bytes": "2731"
},
{
"name": "Crystal",
"bytes": "3169"
},
{
"name": "Erlang",
"bytes": "1403"
},
{
"name": "F#",
"bytes": "241"
},
{
"name": "Go",
"bytes": "56493"
},
{
"name": "Haskell",
"bytes": "3721"
},
{
"name": "Java",
"bytes": "294959"
},
{
"name": "JavaScript",
"bytes": "61004"
},
{
"name": "Julia",
"bytes": "2721"
},
{
"name": "Kotlin",
"bytes": "1393"
},
{
"name": "Lua",
"bytes": "1306"
},
{
"name": "Matlab",
"bytes": "2559"
},
{
"name": "PHP",
"bytes": "25660"
},
{
"name": "Perl",
"bytes": "8008"
},
{
"name": "Prolog",
"bytes": "3687"
},
{
"name": "Python",
"bytes": "216821"
},
{
"name": "QMake",
"bytes": "199"
},
{
"name": "Racket",
"bytes": "419"
},
{
"name": "Ruby",
"bytes": "16386"
},
{
"name": "Rust",
"bytes": "12886"
},
{
"name": "Scala",
"bytes": "4094"
},
{
"name": "Shell",
"bytes": "4580"
},
{
"name": "Swift",
"bytes": "10791"
}
],
"symlink_target": ""
} |
from utils import request_xml, request_json
import requests
import xmltodict
TRACK_BY_ID = ('https://trkweb.dominos.com/orderstorage/GetTrackerData?'
'StoreID={storeId}&OrderKey={orderKey}')
TRACK_BY_PHONE = ('https://trkweb.dominos.com/orderstorage/GetTrackerData?'
'Phone={phone}')
def track_by_phone(phone):
data = request_xml(TRACK_BY_PHONE, phone=str(phone).strip())
response = data['soap:Envelope']['soap:Body']['GetTrackerDataResponse']
return response['OrderStatuses']['OrderStatus']
def track_by_id(store_id, order_key):
return request_json(TRACK_BY_ID, storeID=store_id, orderKey=order_key)
| {
"content_hash": "53e2483bba38600319ddfe11e5a013ec",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 40.8125,
"alnum_prop": 0.7029096477794793,
"repo_name": "mpare002/HackTech_2017",
"id": "d09c92ce6ad2b0c7c9039f59e8c02934fc82dbfb",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dominos_bot/pizzapi/Track.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "685"
},
{
"name": "HTML",
"bytes": "208835"
},
{
"name": "JavaScript",
"bytes": "58641"
},
{
"name": "Python",
"bytes": "52435"
}
],
"symlink_target": ""
} |
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import time
import pytest
import azure.mgmt.communication
import azure.mgmt.notificationhubs
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
from azure.mgmt.communication.models import CommunicationServiceResource
from azure.mgmt.communication.models import KeyType
from azure.mgmt.communication.models import TaggedResource
from azure.mgmt.communication.models import RegenerateKeyParameters
from azure.mgmt.notificationhubs.models import SharedAccessAuthorizationRuleCreateOrUpdateParameters
AZURE_LOCATION = "westus"
COMMUNICATION_SERVICE_LOCATION = "global"
COMMUNICATION_SERVICE_DATA_LOCATION = "UnitedStates"
DISABLE_MGMT_TESTS = True
DISABLE_REASON = "Temporary issue causing the tests to fail"
class MgmtCommunicationTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtCommunicationTest, self).setUp()
self.communication_client = self.create_mgmt_client(
azure.mgmt.communication.CommunicationServiceManagementClient
)
self.notificationhubs_client = self.create_mgmt_client(
azure.mgmt.notificationhubs.NotificationHubsManagementClient
)
@pytest.mark.skipif(DISABLE_MGMT_TESTS, reason=DISABLE_REASON)
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_communication_link_notif_hub(self, resource_group):
GROUP_NAME = resource_group.name
namespace_name = self.get_resource_name("test-namespace-for-comm")
notification_hub_name = self.get_resource_name("test-notification-hub-for-comm")
resource_name = self.get_resource_name("test-resource-link-notif-hub")
# Create the Notification Hubs resource that will be linked to the Communication Service resource
self.notificationhubs_client.namespaces.create_or_update(
GROUP_NAME,
namespace_name,
{
"location": AZURE_LOCATION
}
)
namespace = self.notificationhubs_client.namespaces.get(
GROUP_NAME,
namespace_name
)
while namespace.status == "Created":
if self.is_live == True:
time.sleep(30)
namespace = self.notificationhubs_client.namespaces.get(
GROUP_NAME,
namespace_name
)
notification_hubs = self.notificationhubs_client.notification_hubs.create_or_update(
GROUP_NAME,
namespace_name,
notification_hub_name,
{
"location": AZURE_LOCATION
}
)
# Create auth rule
authorization_rule = { "properties": { "rights": [ "Listen" ] } }
authorization_rule_name = "TestMgmtCommunicationLinkNotificationHub"
self.notificationhubs_client.notification_hubs.create_or_update_authorization_rule(
GROUP_NAME,
namespace_name,
notification_hub_name,
authorization_rule_name,
SharedAccessAuthorizationRuleCreateOrUpdateParameters(**authorization_rule)
)
# Obtain connection string
keys = self.notificationhubs_client.notification_hubs.list_keys(
GROUP_NAME,
namespace_name,
notification_hub_name,
authorization_rule_name
)
notification_hub_connection_string = keys.primary_connection_string
# Create Communication Service resource for test
resource = CommunicationServiceResource(
location=COMMUNICATION_SERVICE_LOCATION,
data_location = COMMUNICATION_SERVICE_DATA_LOCATION
)
resource = self.communication_client.communication_service.begin_create_or_update(
GROUP_NAME,
resource_name,
resource
).result()
# Link Notification Hub's connection string to Communication Service
linked_notification_hub = self.communication_client.communication_service.link_notification_hub(
GROUP_NAME,
resource_name,
{ 'resource_id': notification_hubs.id, 'connection_string': notification_hub_connection_string }
)
self.assertIsNotNone(linked_notification_hub.resource_id)
self.assertEqual(linked_notification_hub.resource_id, notification_hubs.id)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "93f84040c3feb6d4c24ab039d087ee00",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 108,
"avg_line_length": 41.025862068965516,
"alnum_prop": 0.6434124816137844,
"repo_name": "Azure/azure-sdk-for-python",
"id": "97bf153ae701d85bf20fa91bf21c142a590c92be",
"size": "4761",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-mgmt-communication/tests/disable_test_mgmt_communication_notification_hub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import brain
import time
import math
import random
class Bug(object):
def __init__(self, brain, energy, max_grid_edge, min_grid_edge):
random.seed(time.time())
self.brain = brain
self.energy = energy
self.max_grid = max_grid_edge
self.min_grid = min_grid_edge
self.location = [random.randint(min_grid_edge,max_grid_edge),random.randint(min_grid_edge,max_grid_edge)]
self.count = 0
self.age = 0
self.goal = [0,0]
# end def __init__
def time_tick(self,train):
#if self.energy <= 0:
# return False
self.energy -= 1
if train:
direction = self.brain.train_brain([(self.location[0]-self.goal[0]),(self.location[1]-self.goal[1])])
else:
direction = self.brain.run_brain([(self.location[0]-self.goal[0]),(self.location[1]-self.goal[1])])
#direction = direction[:]
direction = direction.index(max(direction))
if direction == 0:
self.location[0] += 1
elif direction == 1:
self.location[0] += 1
self.location[0] += 1
elif direction == 2:
self.location[1] += 1
elif direction == 3:
self.location[0] -= 1
self.location[1] += 1
elif direction == 4:
self.location[0] -= 1
elif direction == 5:
self.location[0] -= 1
self.location[1] -= 1
elif direction == 6:
self.location[1] -= 1
elif direction == 7:
self.location[0] += 1
self.location[1] -= 1
if self.location[0] > self.max_grid:
self.location[0] = self.min_grid
elif self.location[0] < self.min_grid:
self.location[0] = self.max_grid
if self.location[1] > self.max_grid:
self.location[1] = self.min_grid
elif self.location[1] < self.min_grid:
self.location[1] = self.max_grid
if self.location[0] == self.goal[0] and self.location[1] == self.goal[1]:
self.energy = 50
self.count += 1
return True
return False
# end def time_tick
def attack(self, other_bug):
return True
# end def attack
def distance_to_goal(self):
x_distance = ((self.goal[0] - self.location[0])**2)
y_distance = ((self.goal[1] - self.location[1])**2)
return math.sqrt(x_distance + y_distance)
# end def distance_to_goal
def procreate(self, other_bug, mutation_chance):
random.seed()
new_brain = brain.Brain(self.brain.input_size,self.brain.hidden_size,self.brain.output_size)
for i in xrange(self.brain.input_size):
for h in xrange(self.brain.hidden_size):
if random.random() <= mutation_chance:
new_brain.ih_weights[i][h] = random.uniform(-1.0,1.0)
else:
new_brain.ih_weights[i][h] = random.choice([self.brain.ih_weights[i][h],other_bug.brain.ih_weights[i][h]])
for h in xrange(self.brain.hidden_size):
for o in xrange(self.brain.output_size):
if random.random() <= mutation_chance:
new_brain.ho_weights[h][o] = random.uniform(-1.0,1.0)
else:
new_brain.ho_weights[h][o] = random.choice([self.brain.ho_weights[h][o],other_bug.brain.ho_weights[h][o]])
return Bug(new_brain, 50, self.max_grid, self.min_grid)
# end def procreate
# end class Bug | {
"content_hash": "2277104655a3e8e4e8648ced22a817fa",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 126,
"avg_line_length": 34.3177570093458,
"alnum_prop": 0.528322440087146,
"repo_name": "Chippers255/bugs",
"id": "271d334b85acff27eb7fb2fd9109a5405f285db6",
"size": "3672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_source/modules/bug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29439"
}
],
"symlink_target": ""
} |
import os
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.colorchooser as colorchooser
Spinbox = ttk.Spinbox if hasattr(ttk, "Spinbox") else tk.Spinbox
if __name__ == "__main__": # For stand-alone testing with parallel TkUtil
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..")))
import TkUtil
import TkUtil.Dock
from Globals import *
class Dock(TkUtil.Dock.Window):
def create_variables(self):
self.title = "Display"
self.__wordWrap = tk.StringVar()
self.__wordWrap.set("Word")
self.__wordWrap.trace("w", self.__set_word_wrap)
self.__blockCursor = tk.IntVar()
self.__blockCursor.set(False)
self.__blockCursor.trace("w", self.__set_block_cursor)
self.__lineSpacing = tk.StringVar()
self.__lineSpacing.set(0)
self.__lineSpacing.trace("w", self.__set_line_spacing)
def create_widgets(self):
self.wordWrapLabel = ttk.Label(self, text="Wrap:")
self.wordWrapCombobox = ttk.Combobox(self, state="readonly",
values=["None", "Character", "Word"],
textvariable=self.__wordWrap, width=10)
self.blockCursorCheckbutton = ttk.Checkbutton(self,
text="Block Cursor", variable=self.__blockCursor)
self.lineSpacingLabel = ttk.Label(self, text="Line Spacing:")
self.lineSpacingSpinbox = tk.Spinbox(self, from_=0, to=32,
width=3, validate="all", justify=tk.RIGHT,
textvariable=self.__lineSpacing)
self.lineSpacingSpinbox.config(validatecommand=(
self.lineSpacingSpinbox.register(self.__validate_int),
"lineSpacingSpinbox", "%P"))
def create_layout(self):
pad = dict(padx=PAD, pady=PAD)
padW = dict(sticky=tk.W, **pad)
padWE = dict(sticky=(tk.W, tk.E), **pad)
self.wordWrapLabel.grid(row=1, column=0, **padW)
self.wordWrapCombobox.grid(row=1, column=1, columnspan=2, **padWE)
self.blockCursorCheckbutton.grid(row=2, column=0, columnspan=3,
**padWE)
self.lineSpacingLabel.grid(row=3, column=0, columnspan=2, **padW)
self.lineSpacingSpinbox.grid(row=3, column=2, stick=tk.E, **pad)
def __set_word_wrap(self, *args):
self.event_generate("<<WordWrapChanged>>")
def __set_block_cursor(self, *args):
self.event_generate("<<BlockCursorChanged>>")
def __set_line_spacing(self, *args):
self.event_generate("<<LineSpacingChanged>>")
def __validate_int(self, spinbox, number):
spinbox = getattr(self, spinbox)
return TkUtil.validate_spinbox_int(spinbox, number)
@property
def word_wrap(self):
wrap = self.__wordWrap.get().lower()
if wrap == "character":
wrap = "char"
return wrap
@word_wrap.setter
def word_wrap(self, value):
if value.lower() == "char":
value = "character"
self.__wordWrap.set(value.title())
@property
def block_cursor(self):
return bool(self.__blockCursor.get())
@block_cursor.setter
def block_cursor(self, value):
self.__blockCursor.set(value)
@property
def line_spacing(self):
return int(self.__lineSpacing.get())
@line_spacing.setter
def line_spacing(self, value):
self.__lineSpacing.set(value)
if __name__ == "__main__":
if sys.stdout.isatty():
application = tk.Tk()
application.title("Display")
dock = Dock(application, None)
dock.pack(fill=tk.BOTH, expand=True)
dock.bind("<Escape>", lambda *args: application.quit())
application.bind("<Escape>", lambda *args: application.quit())
application.mainloop()
else:
print("Loaded OK")
| {
"content_hash": "66807bc6e38f3a70129a7caa1d6c8d1f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 75,
"avg_line_length": 31.883333333333333,
"alnum_prop": 0.6092524830109776,
"repo_name": "johnobrien/PyPractice",
"id": "cfa9d5484049379b6907e49ee05daf832f809d98",
"size": "4435",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pipeg/texteditor2/Display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "713211"
}
],
"symlink_target": ""
} |
class Intermediate(object):
# Generates the null intermediate representation for the given problem.
@staticmethod
def from_problem(problem):
states = dict([(sid, [sid]) for sid in problem.sids])
return Intermediate(states)
# Generates an intermediate representation from a given patch.
@staticmethod
def from_patch(problem, patch):
rep = Intermediate.from_problem(problem)
for fix in patch.fixes:
rep.apply_fix(problem, fix)
return rep
# Constructs an intermediate representation from a sequence of states for
# each of its addressable statements.
def __init__(self, states):
self.states = states
# Hashes the state of the program at a given SID.
def hash_state(self, problem, sid):
# If the site has been destroyed, return 0.
if self.destroyed(sid):
return 0
# Otherwise transform the state into a base-10 number.
base = problem.max_sid
return reduce(lambda h, (i, s): h + s * pow(base, i),
enumerate(self.states[sid]), 0)
# Applies an atomic fix to this intermediate representation.
def apply_fix(self, problem, fix):
if fix.typ == "delete":
for loc in ([fix.location] + problem.children(fix.location)):
self.states[loc] = [0]
elif fix.typ == "insert":
if self.states[fix.location] == [0]:
self.states[fix.location] = [fix.surrogate]
else:
self.states[fix.location].append(fix.surrogate)
elif fix.typ == "replace":
self.states[fix.location] = [fix.surrogate]
for loc in problem.children(fix.location):
self.states[loc] = [0]
# Determines whether a statement at a given SID has been altered by the
# changes in this intermediate representation.
def altered(self, sid):
return self.states[sid] != [sid] # changed from sid to [sid]
# Determines whether the root statement at a given SID has been altered by
# the changes in this intermediate representation.
def altered_root(self, sid):
return self.states[sid][0] != sid
# Determines whether a replacement has taken place at a given SID.
def replaced(self, sid):
return (not self.destroyed(sid)) and self.states[sid][0] != sid
# Returns a list of the statements inserted at a given SID.
def insertions(self, sid):
return self.states[sid][1:]
# Determines whether a statement at a given SID has been destroyed by the
# changes in this intermediate representation.
def destroyed(self, sid):
return self.states[sid] == [0]
# Determines whether a statement at a given SID has been destroyed by the
# deletion or replacement of one of its ancestors.
def destroyed_by_ancestor(self, problem, sid):
for ancestor in problem.ancestors(sid):
if self.replaced(ancestor) or self.destroyed(ancestor):
return True
return False
# Generates the list of lines comprising this program.
def to_lines(self, problem):
lines = []
# Find all the top-level statements.
q = problem.top_level_statements()[::-1]
while q:
nxt = q.pop()
# Only process this SID if there are statements there.
if self.states[nxt] != [0]:
# Check if the root statement has been replaced.
if self.altered_root(nxt):
lines.extend(problem.lines_within(self.states[nxt][0]))
# If not, process the children of this statement.
else:
lines.append(nxt)
q.extend(problem.immediate_children(nxt)[::-1])
# Process each insertion performed at this SID.
for ins_id in self.states[nxt][1:]:
lines.extend(problem.lines_within(ins_id))
return lines
# Produces a string definition of this intermediate state.
# Useful for debugging.
def to_string(self):
s = ", ".join(["%d -> (%s)" % (sid, " ".join(map(str, s))) for sid, s in self.states[1:]])
return "{%s}" % (s)
| {
"content_hash": "b010a4a467da847e735c6770d86ae730",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 98,
"avg_line_length": 37.557522123893804,
"alnum_prop": 0.6039114043355325,
"repo_name": "ChrisTimperley/EvoAnalyser.py",
"id": "fc4fdcea7e72c3e515fab7503f83262b227a36ed",
"size": "4309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/representation/_patch/intermediate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40252"
}
],
"symlink_target": ""
} |
from .base import BaseType
class SavedActionList(BaseType):
_soap_tag = 'saved_actions'
def __init__(self):
BaseType.__init__(
self,
simple_properties={},
complex_properties={'cache_info': CacheInfo},
list_properties={'saved_action': SavedAction},
)
self.cache_info = None
self.saved_action = []
from saved_action import SavedAction
from cache_info import CacheInfo
| {
"content_hash": "746d6e91bc0b0207cc96c46761d1cc7e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5863539445628998,
"repo_name": "tanium/pytan",
"id": "14930c22adc940e918aa1e3e8e4b893631a669a1",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/taniumpy/object_types/saved_action_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
} |
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = unicode
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.1"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return u"|" + u"|".join(segments) + u"|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == u'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type(u'\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, u'\u044f\u0439\u0446\u0430') == u'\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, u'\u044f\u0439\u0446\u0430') == u' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = u"{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", u'\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=u""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = [u'\u0431\u0443\u043a\u0432\u0430', u'\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [[u'\u0430\u0437', 2], [u'\u0431\u0443\u043a\u0438', 4]] ; \
good_result = u'\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return u"{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return u"{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return u"{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [u""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=u""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = u'\n'.join(['\t'.join(map(_text_type, headers))] + \
[u'\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = u" "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines) | {
"content_hash": "1d3d2b378b96dfac480d16652cf2ff72",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 198,
"avg_line_length": 34.2668240850059,
"alnum_prop": 0.5356601433296582,
"repo_name": "NLeSC/pointcloud-benchmark",
"id": "24da5d80ee392e42064171c1894d5d2003b81385",
"size": "29049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pointcloud/tabulate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "170293"
},
{
"name": "C++",
"bytes": "28083"
},
{
"name": "CMake",
"bytes": "39767"
},
{
"name": "Makefile",
"bytes": "12996"
},
{
"name": "Python",
"bytes": "345656"
},
{
"name": "Shell",
"bytes": "16610"
}
],
"symlink_target": ""
} |
def compute(x,y): # Initializing a function to compute average
average = sum / count # Average = total divide for number of counts
return ("The total is: %f , the count is: %d and the average is: %f") % (sum, count, average) # Return a value of total, count and average
count = 0 # Initialize count = 0
sum = 0 # Initialize and assign value to variable 'sum' = 0
while True: # loop
num = eval(raw_input("Enter a number: ")) # Get input number
if num >0:
sum = sum + num # Keep adding the number to total
count += 1 # Tallying how many times the loop execute.
if len(num) == 0:
break
print compute(sum, count)# Calling function "compute"
# Print "return from function" to screen.
#print "Oop! Please Enter number:" | {
"content_hash": "74e9f4d85db6bc0cead61113c91f7219",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 139,
"avg_line_length": 28.48148148148148,
"alnum_prop": 0.6540962288686606,
"repo_name": "ttruongdc/py-Learning",
"id": "f7218e83e666e75d1da32c3e9bc36cd435732561",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "page65.5.10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4394"
}
],
"symlink_target": ""
} |
import io
from kivy.core.image import Image as CoreImageKivy
from PIL import Image
class CoreImage(CoreImageKivy):
def __init__(self, arg, **kwargs):
super(CoreImage, self).__init__(arg, **kwargs)
def resize(self, fname, fname_scaled, width, height):
"""
reduces the size of an image.
"""
try:
img = Image.open(fname)
except Exception as e:
print('Exception: ', e)
return
img = img.resize((width, height), Image.ANTIALIAS)
try:
img.save(fname_scaled)
except Exception as e:
print('Exception: ', e)
return
| {
"content_hash": "0586bb1789820e77eb2321fb2fe7e54f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 26.44,
"alnum_prop": 0.5537065052950075,
"repo_name": "alexismirandan/Edit-image-kivy-app",
"id": "50401bbd5ee721b3c8548e35c9f088f97888fd89",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/core_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9232"
}
],
"symlink_target": ""
} |
import json
import time
from collections import defaultdict
from os.path import join
import numpy as np
from sklearn.metrics import average_precision_score
from .utils import make_relevance_matrix
from .settings import RESULTS_DIR
class Evaluator(object):
"""The ``Evaluator`` evaluates a retrieval method, collects the perfromance
measures, and keeps values of multiple runs (for example in k-fold
cross-validation).
"""
def __init__(self):
self.mean_ap = []
self.prec_at = defaultdict(list)
self.rel_prec = defaultdict(list)
def eval(self, queries, weights, Y_score, Y_test, n_relevant):
"""
Parameters
----------
queries : array-like, shape = [n_queries, n_classes]
The queries to evaluate
weights : int, default: 1
Ouery weights. Multi-word queries can be weighted to reflect
importance to users.
Y_score : array-like, shape = [n_queries, n_classes]
Scores of queries and sounds.
Y_test : array-like, shape = [n_samples, n_classes]
Test set tags associated with each test set song in
binary indicator format.
n_relevant : array-like, shape = [n_queries]
The number of relevant sounds in X_train for each query.
"""
# delete rows which have no relevant sound
Y_true = make_relevance_matrix(queries, Y_test)
at_least_one_relevant = Y_true.any(axis=1)
Y_true = Y_true[at_least_one_relevant]
Y_score = Y_score[at_least_one_relevant]
n_relevant = n_relevant[at_least_one_relevant]
ap = []
prec = defaultdict(list)
for x in xrange(Y_true.shape[0]):
self.rel_prec[n_relevant[x]].append(
ranking_precision_score(Y_true[x], Y_score[x]))
for k in xrange(1, 21):
prec[k].append(ranking_precision_score(
Y_true[x], Y_score[x], k) * weights[x])
ap.append(average_precision_score(Y_true[x],
Y_score[x]) * weights[x])
self.mean_ap.append(np.sum(ap))
for k, v in prec.iteritems():
self.prec_at[k].append(np.sum(v))
def to_json(self, dataset, method, codebook_size, params):
"""
Write the retrieval performance results to a file.
Parameters
----------
dataset : str
The name of the evaluated dataset.
method : str
The name of the evaluated retrieval method.
codebook_size : int
The codebook size the dataset is encoded with.
params: dict
The ``method``'s parameters used during the evaluation.
"""
self._to_json(join(RESULTS_DIR, '{}_precision.json'.format(dataset)),
method, params, codebook_size, self.prec_at)
self._to_json(join(RESULTS_DIR, '{}_mean_ap.json'.format(dataset)),
method, params, codebook_size, self.mean_ap)
self._to_json(join(RESULTS_DIR, '{}_prec_at_rel.json'.format(dataset)),
method, params, codebook_size, self.rel_prec)
def _check_exists(self, filename):
try:
open(filename)
except IOError:
with open(filename, 'w+') as f:
json.dump(dict(), f)
def _stats(self, name, params, codebook_size, precision):
stats = dict(name=name,
params=params,
codebook_size=codebook_size,
precision=precision)
timestr = time.strftime("%Y%m%d-%H%M%S")
return {timestr: stats}
def _to_json(self, filename, name, params, cb_size, precision):
self._check_exists(filename)
with open(filename, 'r+') as f:
dic = json.load(f)
dic.update(self._stats(name, params, cb_size, precision))
with open(filename, 'w+') as f:
json.dump(dic, f, indent=4)
def ranking_precision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision@k : float
Precision at rank k.
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = y_score.argsort()[::-1]
y_true = y_true[order[:k]]
n_relevant = (y_true == pos_label).sum()
# Divide by min(n_pos, k) so that the best achievable score is always 1.0
return float(n_relevant) / min(n_pos, k)
| {
"content_hash": "dfe4c5dca6f5a0a5775f333e858eb062",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 32.16556291390729,
"alnum_prop": 0.5688696726374305,
"repo_name": "dschwertfeger/cbar",
"id": "44d94253c58e5a173487f9b2076aaecb660066ab",
"size": "4857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbar/evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75905"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20150707_0631'),
]
operations = [
migrations.AlterField(
model_name='user',
name='image',
field=models.ImageField(default='assets/default-user-avatar.png', upload_to='avatars', max_length=500, verbose_name='Profile Image', blank=True),
),
]
| {
"content_hash": "a924b0fdc43da1624c15c9650402f7d3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 157,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.625,
"repo_name": "juliancantillo/iwgk",
"id": "afd3b85773b6afea254fb0c34f81adc741bfc181",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iwgk/users/migrations/0007_auto_20150707_0710.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "943604"
},
{
"name": "HTML",
"bytes": "67934"
},
{
"name": "Handlebars",
"bytes": "22135"
},
{
"name": "JavaScript",
"bytes": "5446384"
},
{
"name": "Python",
"bytes": "54248"
},
{
"name": "Shell",
"bytes": "3620"
},
{
"name": "XSLT",
"bytes": "7177"
}
],
"symlink_target": ""
} |
import uuid
from datetime import datetime, timedelta
import time
import random
from networking import *
import re
import sys
import smtplib
from email.mime.text import MIMEText
import signal, os
def split_hostname(node):
m = re.match(r'^(.+?)(?::(\d+))?$', node)
return (m.group(1), int(m.group(2)))
class DARNode:
def __init__(self, name):
self.name = name
self.connection = None
self.expecting_pong = False
self.failed = False
self.config = None
self.config_version = 0
self.testament = None
self.node_key = None
self.maintenance_mode = False
def connect(self):
assert self.connection is None
(hostname, port) = split_hostname(self.name)
self.connection = DARNHost(self._initialize_outbound_connection, self._receive_data, self._report_error)
self.connection.setHost(hostname, port)
self.connection.connect()
def adopt_connection(self, host):
(hostname, port) = split_hostname(self.name)
host.setHost(hostname, port)
if self.connection is None:
host.change_callbacks(self._initialize_outbound_connection, self._receive_data, self._report_error)
self.connection = host
else:
host.merge(self.connection)
def set_config(self, config, version, testament, node_key):
self.config = config
self.config_version = version
self.testament = testament
self.node_key = node_key
def send_ping(self):
ping_packet = {
'type': 'ping',
'ttl': 15,
'config_version': self.config_version,
}
self.expecting_pong = True
darn.debug("Sending ping to friend node %s, config version %d" % (self.name, self.config_version))
self.connection.send(ping_packet)
"""Push my configuration, testament and node key to this node."""
def push_config(self, other):
config_push = {
'type': 'config',
'ttl': '20',
'config': other.config,
'testament': other.testament,
'node_key': other.node_key,
'config_version': other.config_version,
}
darn.debug("Pushing my %s configuration to node %s" % (other.name, self.name))
self.connection.send(config_push)
def _initialize_outbound_connection(self, host):
assert host == self.connection
self.connection.send_priority({'hostname': darn.mynode.name})
def _receive_data(self, host, data):
assert host == self.connection
darn.debug("DARN Host Data from identified host %s: %s" % (self.name, data))
if 'type' not in data:
host.destroy()
return
if data['type'] == "config":
darn.info("Noted configuration for identified host: %s" % self.name)
self.set_config(data['config'], data['config_version'], data['testament'], data['node_key'])
elif data['type'] == "ping":
darn.debug("Received ping from friend node %s" % self.name)
config_version = data['config_version']
if darn.maintenance_shutdown:
if config_version != 0:
maintenance_packet = {
'hostname': darn.mynode.name,
'type': 'maintenance',
}
self.connection.send(maintenance_packet)
return
pong_packet = {
'type': 'pong',
'ttl': 15,
}
if config_version != darn.mynode.config_version:
darn.info("Friend node %s has older config of mine (version %s), pushing new config version %s"
% (self.name, config_version, darn.mynode.config_version))
self.push_config(darn.mynode)
self.connection.send(pong_packet)
elif data['type'] == "pong":
darn.debug("Received pong from friend node %s" % self.name)
self.expecting_pong = False
self.failed = False
elif data['type'] == "error":
darn.info("Received error from friend node %s" % self.name)
darn.receive_error_event(self, data)
elif data['type'] == "signoff":
darn.info("Received signoff event from node %s, success=%s: %s" % (self.name, data['success'], data['message']))
darn.process_error_event_signoff(self.name, data['id'], data['success'])
elif data['type'] == "maintenance":
self.config = None
self.config_version = 0
else:
darn.info("Received unknown packet type %s from node %s" % (data['type'], self.name))
def _report_error(self, host, exctype, error):
assert host == self.connection
darn.info("Error while connecting to node %s: %s" % (self.name, error))
class DARN:
VERSION = "0.1"
SEND_PINGS=1
LOG_DEBUG=0
def log(self, severity, message):
hostname = "unknown"
if hasattr(self, 'mynode'):
hostname = self.mynode.name
print "%s: DARN[%s][%s]: %s" % (datetime.now(), hostname, severity, message)
def info(self, message):
self.log("info", message)
def debug(self, message):
if DARN.LOG_DEBUG:
self.log("debug", message)
"""Create a DARN object. Read config from given file. """
def __init__(self, configfile):
self.info("Initialising DARN version " + DARN.VERSION)
self.configfile = configfile
self.net = DARNetworking()
self.running = False
self.nodes = {}
self.error_seq = 1
self.error_events = []
self.maintenance_shutdown = False
self.reload()
(host, port) = split_hostname(self.mynode.name)
host = ''
if 'bind_host' in self.mynode.config:
host = self.mynode.config['bind_host']
self.debug("Going to listen on host %s port %s" % (host if host != '' else '*', port))
self.net.create_server_socket(host, port, lambda *_: None, self.data_from_unidentified_host)
for node in self.mynode.config['nodes']:
name = node['hostname']
self.nodes[name] = DARNode(name)
self.nodes[name].connect()
def data_from_unidentified_host(self, host, data):
self.debug("DARN Host connected to me: %s and sent: %s" % (host, data))
if 'hostname' not in data:
host.destroy()
return
if data['hostname'] in self.nodes:
node = self.nodes[data['hostname']]
else:
node = DARNode(data['hostname'])
self.nodes[data['hostname']] = node
node.adopt_connection(host)
def stop(self):
self.info("Stopping")
self.running = False
"""Start the DARN daemon. This call blocks until stop() is called. """
def run(self):
if self.running:
return
self.info("Starting")
self.running = True
self.net.add_timer(0, self.check_nodes)
# This method blocks until there are no more timers to run
self.net.run()
"""
Start checking all nodes. This generates a list of 'ping' calls to the
networking layer. If no succesful pong comes back for a given node,
an error event is generated. This is checked asynchronously, so this
call does not block.
"""
def check_nodes(self):
self.debug("About to check friend nodes")
if not self.running:
return
if not DARN.SEND_PINGS:
return
for name in self.mynode.config['nodes']:
node = self.nodes[name['hostname']]
node.send_ping()
self.net.add_timer(10, self.check_timeouts)
self.net.add_timer(15, self.check_nodes)
def handle_error_event(self, event, callback):
victim_config = self.nodes[event['victim']].config
if not 'email' in victim_config:
callback(False, "Cannot send e-mail regarding failure of victim %s: no e-mail address known" % event['victim'])
email = victim_config['email']
if not 'smtp' in self.mynode.config or not 'sender' in self.mynode.config['smtp'] or not 'host' in self.mynode.config['smtp']:
callback(False, "Cannot send e-mail regarding failure of victim %s: no valid smtp configuration" % event['victim'])
body = "Error event report fired!\n"
body += "Event report ID: %s\n" % event['id']
body += "Time: %s\n" % datetime.now()
body += "Victim: %s\n" % event['victim']
body += "Message: %s\n" % event['message']
msg = MIMEText(body)
msg['Subject'] = "DARN! Error event report"
msg['From'] = self.mynode.config['smtp']['sender']
msg['To'] = email
email_succeeded = None
email_error = None
try:
s = smtplib.SMTP(self.mynode.config['smtp']['host'])
recipients_failed = s.sendmail(self.mynode.config['smtp']['sender'], [email], msg.as_string())
s.quit()
if len(recipients_failed) > 0:
email_succeeded = False
email_error = "Failed to send to some recipients: " + str(recipients_failed)
else:
email_succeeded = True
except Exception as e:
email_succeeded = False
email_error = str(e)
callback(email_succeeded, email_error)
"""
Received an error event. Process it by sending an e-mail, and send a
sign-off reply. 'node' is the sender of this error event; the victim
is in event['victim'].
"""
def receive_error_event(self, node, event):
self.debug("Received error event for node %s" % node.name)
if event['victim'] not in self.nodes or self.nodes[event['victim']].config is None:
self.info("Received error event about victim %s, but I don't have its node config, so can't inform it" % event['victim'])
signoff_packet = {
'type': 'signoff',
'id': event['id'],
'message': "Can't signoff, don't have a node config for this node",
'success': False,
}
node.connection.send(signoff_packet)
return
def handle_message(success, message):
signoff_packet = {
'type': 'signoff',
'id': event['id'],
'message': message,
'success': success,
}
node.connection.send(signoff_packet)
self.handle_error_event(event, handle_message)
"""
Check if any of the hosts we checked earlier didn't respond yet.
Generate error events for every host that seems to be down.
"""
def check_timeouts(self):
if not self.running:
return
for victim in self.mynode.config['nodes']:
victim = victim['hostname']
node = self.nodes[victim]
if not node.expecting_pong:
continue
if not node.config:
self.info("Expected pong from friend %s, but did not receive any; however, don't have node configuration, so silent ignore" % victim)
continue
if not node.testament:
self.info("Expected pong from friend %s, but did not receive any; however, we don't know its testament, so silent ignore" % victim)
continue
if node.failed:
self.info("Expected pong from friend %s, but did not receive any, host is probably still down" % victim)
continue
self.info("Expected pong from friend %s, but did not receive any, generating error event" % victim)
node.failed = True
self.error_seq = self.error_seq + 1
error_event = {
'type': 'error',
'id': str(uuid.uuid1(None, self.error_seq)),
'victim': victim,
'ttl': 20,
'message': "%s failed to receive response from %s within 30 seconds" % (self.mynode.name, victim),
}
error_event_status = {
'testament': node.testament,
'current_index': None,
'timeout': datetime.fromtimestamp(0),
'node_failed': False, # XXX reporter_failed?
}
self.error_events.append((error_event, error_event_status))
self.pump_error_events()
"""
For every error event that's still active, check if we need to send it
to the next node in the victim's testament list.
"""
def pump_error_events(self):
self.debug("Pumping %d error events" % len(self.error_events))
for (event, event_status) in self.error_events:
self.debug("Error event has status: %s" % event_status)
if event_status['timeout'] <= datetime.now() or event_status['node_failed']:
if event_status['current_index'] is None:
# this event was never sent anywhere (or all nodes failed and we're trying them all again)
event_status['current_index'] = 0
else:
event_status['current_index'] += 1
if len(event_status['testament']) <= event_status['current_index']:
self.info("All testament nodes for a victim failed. Starting over.")
event_status['current_index'] = None
event_status['timeout'] = datetime.now() + timedelta(minutes=5)
event_status['node_failed'] = False
continue
current_node = event_status['testament'][event_status['current_index']]
event_status['timeout'] = datetime.now() + timedelta(seconds=20)
event_status['node_failed'] = False
if current_node == self.mynode.name:
self.info("Trying to handle error event about victim %s myself" % event['victim'])
def handle_response(success, message):
successstr = "Failed"
if success:
successstr = "Succeeded"
self.info("%s to handle error event about victim %s myself: %s" % (successstr, event['victim'], message))
self.process_error_event_signoff(current_node, event['id'], success)
self.handle_error_event(event, handle_response)
else:
self.info("Sending error event about victim %s to node %s" % (event['victim'], current_node))
if current_node not in self.nodes:
self.nodes[current_node] = DARNode(current_node)
self.nodes[current_node].connect()
self.nodes[current_node].connection.send(event)
"""
Process an error-event sign-off packet from a node. If the sign-off is
succesful, forget about the error event. If it's unsuccesfull, immediately
mark the error event so that it is sent to the next testament node.
"""
def process_error_event_signoff(self, node, id, success):
self.debug("Received error event signoff packet from node %s, success %s" % (node, success))
new_error_events = []
for (event, event_status) in self.error_events:
if event['id'] == id:
victim = event['victim']
self.debug("Packet is about victim %s" % victim)
if success:
self.info("Node %s succesfully signed-off error event about victim %s" % (node, victim))
continue
else:
self.info("Node %s failed to handle error event about victim %s" % (node, victim))
event_status['node_failed'] = True
new_error_events.append((event, event_status))
# TODO: Niet steeds opnieuw schrijven maar gewoon een dict gebruiken
self.error_events = new_error_events
self.pump_error_events()
"""
"""
def reload(self):
config = self.load_config(self.configfile)
self.mynode = DARNode(config['hostname'])
self.mynode.set_config(config, int(time.time()), self.generate_testament(config), self.generate_node_key())
self.info("Loaded configuration version %s" % self.mynode.config_version)
self.push_config()
"""
Load configuration from the given file.
"""
def load_config(self, configfile):
fh = open(configfile)
cfg = json.load(fh)
fh.close()
return cfg
"""
Generate testament from configuration. See module
documentation for more information about the testament.
"""
def generate_testament(self, config):
nodes = []
for node in config['nodes']:
nodes.append(node['hostname'])
return nodes
"""
Generate a node key. See module documentation for more information
about the testament.
"""
def generate_node_key(self):
return "four"
def enable_maintenance(self):
self.maintenance_shutdown = True
maintenance_packet = {
'hostname': self.mynode.name,
'type': 'maintenance',
}
for name in self.mynode.config['nodes']:
node = self.nodes[name['hostname']]
node.connection.send(maintenance_packet)
self.net.add_timer(10, lambda: sys.exit(0))
"""Push configuration, testament and node key to all nodes."""
def push_config(self):
for hostname in self.nodes:
self.nodes[hostname].push_config(self.mynode)
if __name__ == "__main__":
darn = DARN(sys.argv[1])
def _reload(a, b):
darn.reload()
signal.signal(signal.SIGHUP, _reload)
def _maintenance(a, b):
darn.enable_maintenance()
signal.signal(signal.SIGUSR1, _maintenance)
darn.run()
| {
"content_hash": "7aa0924012016cffad91ae0e4e25ed9a",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 137,
"avg_line_length": 34.183673469387756,
"alnum_prop": 0.6796019900497512,
"repo_name": "Jille/darn",
"id": "15ad027f5bc7e7b59e913cb897880b29931c4f1d",
"size": "15075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "20671"
}
],
"symlink_target": ""
} |
from ddt import ddt, data
from rest_framework import status, test
from waldur_core.core.tests.helpers import override_waldur_core_settings
from waldur_core.structure import models
from . import fixtures, factories
class ServiceSettingsListTest(test.APITransactionTestCase):
def setUp(self):
self.users = {
'staff': factories.UserFactory(is_staff=True),
'owner': factories.UserFactory(),
'not_owner': factories.UserFactory(),
}
self.customers = {
'owned': factories.CustomerFactory(),
'inaccessible': factories.CustomerFactory(),
}
self.customers['owned'].add_user(self.users['owner'], models.CustomerRole.OWNER)
self.settings = {
'shared': factories.ServiceSettingsFactory(shared=True),
'inaccessible': factories.ServiceSettingsFactory(customer=self.customers['inaccessible']),
'owned': factories.ServiceSettingsFactory(
customer=self.customers['owned'], backend_url='bk.url', password='123'),
}
# Token is excluded, because it is not available for OpenStack
self.credentials = ('backend_url', 'username', 'password')
def test_user_can_see_shared_settings(self):
self.client.force_authenticate(user=self.users['not_owner'])
response = self.client.get(factories.ServiceSettingsFactory.get_list_url())
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data), 1)
self.assert_credentials_hidden(response.data[0])
self.assertEqual(response.data[0]['uuid'], self.settings['shared'].uuid.hex, response.data)
def test_user_can_see_shared_and_own_settings(self):
self.client.force_authenticate(user=self.users['owner'])
response = self.client.get(factories.ServiceSettingsFactory.get_list_url())
uuids_recieved = {d['uuid'] for d in response.data}
uuids_expected = {self.settings[s].uuid.hex for s in ('shared', 'owned')}
self.assertEqual(uuids_recieved, uuids_expected, response.data)
def test_admin_can_see_all_settings(self):
self.client.force_authenticate(user=self.users['staff'])
response = self.client.get(factories.ServiceSettingsFactory.get_list_url())
uuids_recieved = {d['uuid'] for d in response.data}
uuids_expected = {s.uuid.hex for s in self.settings.values()}
self.assertEqual(uuids_recieved, uuids_expected, uuids_recieved)
def test_user_can_see_credentials_of_own_settings(self):
self.client.force_authenticate(user=self.users['owner'])
response = self.client.get(factories.ServiceSettingsFactory.get_url(self.settings['owned']))
self.assert_credentials_visible(response.data)
def test_user_cant_see_others_settings(self):
self.client.force_authenticate(user=self.users['not_owner'])
response = self.client.get(factories.ServiceSettingsFactory.get_url(self.settings['owned']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_admin_can_see_all_credentials(self):
self.client.force_authenticate(user=self.users['staff'])
response = self.client.get(factories.ServiceSettingsFactory.get_url(self.settings['owned']))
self.assert_credentials_visible(response.data)
def test_user_cant_see_shared_credentials(self):
self.client.force_authenticate(user=self.users['owner'])
response = self.client.get(factories.ServiceSettingsFactory.get_url(self.settings['shared']))
self.assert_credentials_hidden(response.data)
def test_admin_can_see_settings_only_with_resources(self):
self.client.force_authenticate(user=self.users['staff'])
instance = factories.TestNewInstanceFactory()
response = self.client.get(factories.ServiceSettingsFactory.get_list_url(), {'has_resources': 'true'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = instance.service_project_link.service.settings.name
self.assertEqual(response.data[0]['name'], expected)
def test_admin_can_see_settings_without_resources(self):
self.client.force_authenticate(user=self.users['staff'])
service_with_resource = factories.TestNewInstanceFactory()
service_without_resource = factories.ServiceSettingsFactory()
response = self.client.get(factories.ServiceSettingsFactory.get_list_url(), {'has_resources': 'false'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
uuid_expected = service_without_resource.uuid.hex
uuid_unexpected = service_with_resource.service_project_link.service.settings.uuid.hex
uuids_received = [d['uuid'] for d in response.data]
self.assertIn(uuid_expected, uuids_received)
self.assertNotIn(uuid_unexpected, uuids_received)
def test_settings_without_resources_are_filtered_out(self):
self.client.force_authenticate(user=self.users['staff'])
response = self.client.get(factories.ServiceSettingsFactory.get_list_url(), {'has_resources': 'true'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def assert_credentials_visible(self, data):
for field in self.credentials:
self.assertIn(field, data)
def assert_credentials_hidden(self, data):
for field in self.credentials:
self.assertNotIn(field, data)
@ddt
class ServiceSettingsUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.service_settings = self.fixture.service.settings
self.url = factories.ServiceSettingsFactory.get_url(self.service_settings)
@data('staff', 'owner')
def test_staff_and_owner_can_update_service_settings(self, user):
self.assert_user_can_update_service_settings(user)
@data('admin', 'manager')
def test_admin_and_owner_can_not_update_service_settings(self, user):
self.assert_user_can_not_get_service_settings(user)
@override_waldur_core_settings(ONLY_STAFF_MANAGES_SERVICES=True)
def test_if_only_staff_manages_services_he_can_update_it(self):
self.assert_user_can_update_service_settings('staff')
@data('owner', 'admin', 'manager')
@override_waldur_core_settings(ONLY_STAFF_MANAGES_SERVICES=True)
def test_if_only_staff_manages_services_other_users_can_not_update_it(self, user):
self.assert_user_can_not_update_service_settings(user)
def assert_user_can_update_service_settings(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.update_service_settings()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertEqual(self.service_settings.name, 'Valid new name')
def assert_user_can_not_update_service_settings(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.update_service_settings()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.service_settings.refresh_from_db()
self.assertNotEqual(self.service_settings.name, 'Valid new name')
def assert_user_can_not_get_service_settings(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.update_service_settings()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.service_settings.refresh_from_db()
self.assertNotEqual(self.service_settings.name, 'Valid new name')
def update_service_settings(self):
return self.client.patch(self.url, {'name': 'Valid new name'})
@ddt
class SharedServiceSettingUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.service_settings = self.fixture.service_settings
self.service_settings.shared = True
self.service_settings.save()
self.url = factories.ServiceSettingsFactory.get_url(self.service_settings)
def get_valid_payload(self):
return {'name': 'test'}
@data('staff', 'owner')
def test_only_staff_and_an_owner_of_an_unshared_service_settings_can_update_the_settings(self, user):
self.service_settings.shared = False
self.service_settings.save()
self.client.force_authenticate(getattr(self.fixture, user))
payload = self.get_valid_payload()
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertEqual(self.service_settings.name, payload['name'])
@data('owner', 'manager', 'admin')
def test_user_cannot_update_shared_service_settings_without_customer_if_he_has_no_permission(self, user):
self.service_settings.customer = None
self.service_settings.save()
self.client.force_authenticate(getattr(self.fixture, user))
payload = self.get_valid_payload()
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.service_settings.refresh_from_db()
self.assertNotEqual(self.service_settings.name, payload['name'])
@data('staff')
def test_user_can_update_shared_service_settings_without_customer_if_he_has_permission(self, user):
self.service_settings.customer = None
self.service_settings.save()
self.client.force_authenticate(getattr(self.fixture, user))
payload = self.get_valid_payload()
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertEqual(self.service_settings.name, payload['name'])
@data('manager', 'admin')
def test_user_cannot_update_shared_service_settings_with_customer_if_he_has_no_permission(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
payload = self.get_valid_payload()
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.service_settings.refresh_from_db()
self.assertNotEqual(self.service_settings.name, payload['name'])
def test_user_cannot_change_unshared_settings_type(self):
self.service_settings.shared = False
self.service_settings.save()
self.client.force_authenticate(user=self.fixture.owner)
payload = {'name': 'Test backend', 'type': 2}
response = self.client.patch(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertNotEqual(self.service_settings.type, payload['type'], response.data)
def test_user_can_change_unshared_settings_password(self):
self.service_settings.shared = False
self.service_settings.save()
self.client.force_authenticate(user=self.fixture.owner)
payload = {'password': 'secret'}
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertEqual(self.service_settings.password, payload['password'], response.data)
def test_user_cannot_change_shared_settings_password(self):
self.client.force_authenticate(user=self.fixture.owner)
payload = {'password': 'secret'}
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_are_partially_updated(self):
required_field_name = 'availability_zone'
self.service_settings.shared = False
self.service_settings.options = {required_field_name: 'value'}
self.service_settings.save()
self.client.force_authenticate(user=self.fixture.owner)
payload = {'tenant_name': 'secret'}
response = self.client.patch(self.url, data=payload)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.service_settings.refresh_from_db()
self.assertIn(required_field_name, self.service_settings.options)
@ddt
class ServiceSettingsUpdateCertifications(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ServiceFixture()
self.settings = self.fixture.service_settings
self.associated_certification = factories.ServiceCertificationFactory()
self.settings.certifications.add(self.associated_certification)
self.new_certification = factories.ServiceCertificationFactory()
self.url = factories.ServiceSettingsFactory.get_url(self.settings, 'update_certifications')
@data('staff', 'owner')
def test_user_can_update_certifications_for_unshared_settings(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
payload = self._get_payload(self.new_certification)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.settings.refresh_from_db()
self.assertTrue(self.settings.certifications.filter(pk=self.new_certification.pk).exists())
self.assertFalse(self.settings.certifications.filter(pk=self.associated_certification.pk).exists())
@data('manager', 'admin', 'global_support', 'owner')
def test_user_can_not_update_certifications_for_shared_settings_if_he_is_not_staff(self, user):
self.settings.shared = True
self.settings.save()
self.client.force_authenticate(getattr(self.fixture, user))
payload = self._get_payload(self.associated_certification)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@override_waldur_core_settings(ONLY_STAFF_MANAGES_SERVICES=True)
@data('owner', 'manager', 'admin')
def test_if_only_staff_manages_services_other_user_can_not_update_certifications(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
payload = self._get_payload(self.associated_certification)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_staff_can_update_certifications_for_shared_settings(self):
self.settings.shared = True
self.settings.save()
self.client.force_authenticate(self.fixture.staff)
payload = self._get_payload(self.new_certification)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.settings.refresh_from_db()
self.assertTrue(self.settings.certifications.filter(pk=self.new_certification.pk).exists())
def _get_payload(self, *certifications):
certification_urls = [{"url": factories.ServiceCertificationFactory.get_url(c)} for c in certifications]
return {
'certifications': certification_urls
}
| {
"content_hash": "03573c79b637b8a932033db8327ecc00",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 112,
"avg_line_length": 43.934285714285714,
"alnum_prop": 0.6962996683358262,
"repo_name": "opennode/nodeconductor",
"id": "211ef909fb30b4d615278f581b86c8760250be11",
"size": "15377",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "waldur_core/structure/tests/test_service_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1877"
},
{
"name": "HTML",
"bytes": "17528"
},
{
"name": "JavaScript",
"bytes": "248900"
},
{
"name": "Python",
"bytes": "1254720"
}
],
"symlink_target": ""
} |
"""files placed in this module are ignored in VCS, you can use this location as a playground""" | {
"content_hash": "bd07dddce4407a84dd71378d761bcad9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 95,
"avg_line_length": 95,
"alnum_prop": 0.7578947368421053,
"repo_name": "alviproject/alvi",
"id": "139f87e4a0c297a10517c1996a7c19e14ae6c489",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alvi/client/local_scenes/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42568"
},
{
"name": "HTML",
"bytes": "35975"
},
{
"name": "JavaScript",
"bytes": "152425"
},
{
"name": "Python",
"bytes": "108114"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
"""
Going Down!
This is the first level. Zort has been shot out of orbit and
crashlanded here. This level should be relatively straight forward and
take no more than 2 minutes to complete. There should be no monsters,
an obvious path, one door and one switch to go through.
"""
# import your game entities here
# implement any level specific enemies here
from zort.entity import *
from zort.enemies import *
from zort.hero import Hero
from zort.level import Task
from zort import hex_model
def setup_level(level_scene):
"""
Initialize your entities
"""
level_scene.move_hero((1, 1))
level_scene.build_entity(Saucer, 'shipPink_manned.png', (4, 4))
level_scene.build_entity(Enemy, "alienGreen.png", (4, 4))
level_scene.build_button("testDoor", "tileMagic_tile.png", (2, 4))
level_scene.build_door("testDoor", (0, 0))
e = ShipPart('smallRockStone.png', level_scene.load_level)
level_scene.add_entity(e, (2, 2))
# # start the silly timer to drop powerups
# #timer = Task(self.new_powerup, 5000, -1)
# #self.timers.add(timer)
def handle_internal_events(level_scene):
"""
Handle non-entity specific events here
(or entity specific events if that means getting the game done on time)
"""
pass
| {
"content_hash": "7c4bae2749207346a1240600d3840dac",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 28.2,
"alnum_prop": 0.6966115051221434,
"repo_name": "bitcraft/pyweek19",
"id": "729c9e73b117cdac7ae9ff4ea7359b0fcc8eb269",
"size": "1269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zort/levels/test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "190284"
}
],
"symlink_target": ""
} |
import time
from requests import request, ConnectionError
from ..utils import SSLHttpAdapter, module_member, parse_qs, user_agent
from ..exceptions import AuthFailed
class BaseAuth(object):
"""A authentication backend that authenticates the user based on
the provider response"""
name = '' # provider name, it's stored in database
supports_inactive_user = False # Django auth
ID_KEY = None
EXTRA_DATA = None
GET_ALL_EXTRA_DATA = False
REQUIRES_EMAIL_VALIDATION = False
SEND_USER_AGENT = False
SSL_PROTOCOL = None
def __init__(self, strategy, redirect_uri=None):
self.strategy = strategy
self.redirect_uri = redirect_uri
self.data = self.strategy.request_data()
self.redirect_uri = self.strategy.absolute_uri(
self.redirect_uri
)
def setting(self, name, default=None):
"""Return setting value from strategy"""
return self.strategy.setting(name, default=default, backend=self)
def start(self):
if self.uses_redirect():
return self.strategy.redirect(self.auth_url())
else:
return self.strategy.html(self.auth_html())
def complete(self, *args, **kwargs):
return self.auth_complete(*args, **kwargs)
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def process_error(self, data):
"""Process data for errors, raise exception if needed.
Call this method on any override of auth_complete."""
pass
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = kwargs.get('strategy') or self.strategy
self.redirect_uri = kwargs.get('redirect_uri') or self.redirect_uri
self.data = self.strategy.request_data()
kwargs.setdefault('is_new', False)
pipeline = self.strategy.get_pipeline(self)
args, kwargs = self.strategy.clean_authenticate_args(*args, **kwargs)
return self.pipeline(pipeline, *args, **kwargs)
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline(self)
kwargs['name'] = self.name
kwargs['user_storage'] = self.strategy.storage.user
return self.run_pipeline(pipeline, *args, **kwargs)
def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request_data())
out.setdefault('details', {})
for idx, name in enumerate(pipeline[pipeline_index:]):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
return out
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return default extra data to store in extra_data field"""
data = {
# store the last time authentication toke place
'auth_time': int(time.time())
}
extra_data_entries = []
if self.GET_ALL_EXTRA_DATA or self.setting('GET_ALL_EXTRA_DATA', False):
extra_data_entries = response.keys()
else:
extra_data_entries = (self.EXTRA_DATA or []) + self.setting('EXTRA_DATA', [])
for entry in extra_data_entries:
if not isinstance(entry, (list, tuple)):
entry = (entry,)
size = len(entry)
if size >= 1 and size <= 3:
if size == 3:
name, alias, discard = entry
elif size == 2:
(name, alias), discard = entry, False
elif size == 1:
name = alias = entry[0]
discard = False
value = response.get(name) or details.get(name)
if discard and not value:
continue
data[alias] = value
return data
def auth_allowed(self, response, details):
"""Return True if the user should be allowed to authenticate, by
default check if email is whitelisted (if there's a whitelist)"""
emails = self.setting('WHITELISTED_EMAILS', [])
domains = self.setting('WHITELISTED_DOMAINS', [])
email = details.get('email')
allowed = True
if email and (emails or domains):
domain = email.split('@', 1)[1]
allowed = email in emails or domain in domains
return allowed
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get(self.ID_KEY)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
def get_user_names(self, fullname='', first_name='', last_name=''):
# Avoid None values
fullname = fullname or ''
first_name = first_name or ''
last_name = last_name or ''
if fullname and not (first_name or last_name):
try:
first_name, last_name = fullname.split(' ', 1)
except ValueError:
first_name = first_name or fullname or ''
last_name = last_name or ''
fullname = fullname or ' '.join((first_name, last_name))
return fullname.strip(), first_name.strip(), last_name.strip()
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
return self.strategy.get_user(user_id)
def continue_pipeline(self, partial):
"""Continue previous halted pipeline"""
return self.strategy.authenticate(self,
pipeline_index=partial.next_step,
*partial.args,
**partial.kwargs)
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process. The defaults can be
overridden by GET parameters."""
extra_arguments = self.setting('AUTH_EXTRA_ARGUMENTS', {}).copy()
extra_arguments.update((key, self.data[key]) for key in extra_arguments
if key in self.data)
return extra_arguments
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
def request(self, url, method='GET', *args, **kwargs):
kwargs.setdefault('headers', {})
if self.setting('VERIFY_SSL') is not None:
kwargs.setdefault('verify', self.setting('VERIFY_SSL'))
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']:
kwargs['headers']['User-Agent'] = self.setting('USER_AGENT') or \
user_agent()
try:
if self.SSL_PROTOCOL:
session = SSLHttpAdapter.ssl_adapter_session(self.SSL_PROTOCOL)
response = session.request(method, url, *args, **kwargs)
else:
response = request(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
response.raise_for_status()
return response
def get_json(self, url, *args, **kwargs):
return self.request(url, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, *args, **kwargs).text)
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return self.setting('KEY'), self.setting('SECRET')
| {
"content_hash": "c64634bb483e804bbb7f1158a57bf606",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 89,
"avg_line_length": 40.752066115702476,
"alnum_prop": 0.5886229973636179,
"repo_name": "LennonChin/Django-Practices",
"id": "9b34f2a3b9981bdefb8caeeabbea8218ca359727",
"size": "9862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MxShop/extra_apps/social_core/backends/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "513444"
},
{
"name": "HTML",
"bytes": "501361"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "1810740"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "1739514"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'main.views.index', name='index'),
url(r'^oauth2/$', 'main.views.oauth2', name='oauth2'),
url(r'^about/$', 'main.views.about', name='about'),
url(r'^metadata/download/(?P<id>\d+)/$', 'main.views.file_download', name='metadata_download'),
url(r'^metadata/template/select/$', 'main.views.metadata_select_template', name='metadata-select-template'),
url(r'^metadata/templates/$', 'main.views.metadata_set_templates', name='metadata-set-templates'),
url(r'^view/file/$', 'main.views.box_view_file', name='box-view-file'),
url(r'^view/session/$', 'main.views.box_view_session', name='box-view-session'),
url(r'^folder/(?P<folder_id>\d+)/items/$', 'main.views.folder_items', name='folder-items'),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "e7b512a9d63473715409a5c4b20a4ed3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 112,
"avg_line_length": 56.3125,
"alnum_prop": 0.6625971143174251,
"repo_name": "enikesha/box-demo",
"id": "e5d7dd953d5624fcbe069f3538860ec404c63ea8",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "box_demo/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "140716"
},
{
"name": "HTML",
"bytes": "14329"
},
{
"name": "JavaScript",
"bytes": "780"
},
{
"name": "Python",
"bytes": "11575"
}
],
"symlink_target": ""
} |
"""
WSGI config for rotations project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rotations.settings')
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| {
"content_hash": "769adb2e65f2d77709237b3679c2ed4f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 27,
"alnum_prop": 0.7921810699588477,
"repo_name": "rlucioni/rotations",
"id": "4ec84ae1c4e436cf0c3408ee8b3c399bd20acdd9",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rotations/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "15686"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0025_auto_20180716_1613'),
]
operations = [
migrations.AddField(
model_name='item',
name='new_flag_color',
field=models.CharField(blank=True, default='', max_length=200, verbose_name='Цвет подложки надписи'),
),
migrations.AddField(
model_name='item',
name='new_flag_text',
field=models.CharField(blank=True, default='', max_length=200, verbose_name='Текст надписи НОВИНКА'),
),
]
| {
"content_hash": "3a11d5e2d52e6fb9c279a738faf27715",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 113,
"avg_line_length": 28.73913043478261,
"alnum_prop": 0.594553706505295,
"repo_name": "Zex0n/django-simple-cms",
"id": "c57f6d0abaa923e4308b84724cc67c6495798ffc",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/migrations/0026_auto_20180716_1626.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "615000"
},
{
"name": "CoffeeScript",
"bytes": "2818"
},
{
"name": "Dockerfile",
"bytes": "150"
},
{
"name": "HTML",
"bytes": "301125"
},
{
"name": "JavaScript",
"bytes": "1734416"
},
{
"name": "Python",
"bytes": "190924"
},
{
"name": "Shell",
"bytes": "1154"
}
],
"symlink_target": ""
} |
"""Utility functions for loading the automobile data set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
COLUMN_TYPES = collections.OrderedDict([
("symboling", int),
("normalized-losses", float),
("make", str),
("fuel-type", str),
("aspiration", str),
("num-of-doors", str),
("body-style", str),
("drive-wheels", str),
("engine-location", str),
("wheel-base", float),
("length", float),
("width", float),
("height", float),
("curb-weight", float),
("engine-type", str),
("num-of-cylinders", str),
("engine-size", float),
("fuel-system", str),
("bore", float),
("stroke", float),
("compression-ratio", float),
("horsepower", float),
("peak-rpm", float),
("city-mpg", float),
("highway-mpg", float),
("price", float)
])
def raw_dataframe():
"""Load the automobile data set as a pd.DataFrame."""
# Download and cache the data
path = tf.keras.utils.get_file(URL.split("/")[-1], URL)
# Load it into a pandas DataFrame
df = pd.read_csv(path, names=COLUMN_TYPES.keys(),
dtype=COLUMN_TYPES, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Load the automobile data set and split it train/test and features/label.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the data set to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = load_data(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features DataFrame.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
def make_dataset(x, y=None):
"""Create a slice Dataset from a pandas DataFrame and labels"""
# TODO(markdaooust): simplify this after the 1.4 cut.
# Convert the DataFrame to a dict
x = dict(x)
# Convert the pd.Series to np.arrays
for key in x:
x[key] = np.array(x[key])
items = [x]
if y is not None:
items.append(np.array(y, dtype=np.float32))
# Create a Dataset of slices
return tf.data.Dataset.from_tensor_slices(tuple(items))
| {
"content_hash": "f2186380e106ce812056589f54613f9f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 87,
"avg_line_length": 28.97345132743363,
"alnum_prop": 0.6496640195479536,
"repo_name": "jiaphuan/models",
"id": "ceb331b49cf251b06b53445c6e1266ce0d3ab6d7",
"size": "3963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/cookbook/regression/automobile_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1353"
},
{
"name": "C++",
"bytes": "1224262"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "71060"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "Python",
"bytes": "5957505"
},
{
"name": "Shell",
"bytes": "76858"
}
],
"symlink_target": ""
} |
import envi.archs.i386 as e_i386
import vivisect.impemu.emulator as v_i_emulator
from envi.const import RMETA_NMASK
non_use_mnems = ('push', )
class i386WorkspaceEmulator(v_i_emulator.WorkspaceEmulator, e_i386.IntelEmulator):
__archemu__ = e_i386.IntelEmulator
taintregs = [
e_i386.REG_EAX, e_i386.REG_ECX, e_i386.REG_EDX,
e_i386.REG_EBX, e_i386.REG_EBP, e_i386.REG_ESI,
e_i386.REG_EDI,
]
def __init__(self, vw, **kwargs):
'''
Please see the base emulator class in vivisect/impemu/emulator.py for the parameters
that can be passed through kwargs
'''
e_i386.IntelEmulator.__init__(self)
v_i_emulator.WorkspaceEmulator.__init__(self, vw, **kwargs)
self.setEmuOpt('i386:repmax', 1)
def getRegister(self, index):
rval = value = e_i386.IntelEmulator.getRegister(self, index)
if self.op is None:
return value
ridx = index
if self.isMetaRegister(index):
ridx = index & RMETA_NMASK
# use the value of the real register (not the meta register) for _useVirtAddr
# but ultimately return the meta register value
rval = e_i386.IntelEmulator.getRegister(self, ridx)
if ridx not in self.taintregs:
return value
if self.isRegUse(self.op, ridx):
self._useVirtAddr(rval)
return value
def isRegUse(self, op, ridx):
'''
determines if the sequence of uses(get/setRegister) is a register 'use'.
'''
# conditions that indicate 'usage':
# * 'normal usage'
# read from a register with *no* previous writes/sets to that register
# conditions that do *not* indicate 'usage':
# * 'register initialization', ie xor rax, rax, mov rax, 0, etc
# * 'save/restore of register', ie push/pop
# blacklist of mnems that do *not* indicate a 'use'
# if first item is a set, then this is *not* an arg(register) use
# (reg initialization)
if op.mnem == 'xor' and op.opers[0] == op.opers[1]:
# xor register initialization
return False
else:
# if op mnem is in blacklist, it's not a use either
if op.mnem in non_use_mnems:
return False
return True
| {
"content_hash": "80aa9fd8e7f6e0f7088b9c154e40747e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 32.21917808219178,
"alnum_prop": 0.5973639455782312,
"repo_name": "vivisect/vivisect",
"id": "dd65584762aaa8a1f2d34bd9e01703dff7f9fd58",
"size": "2352",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vivisect/impemu/platarch/i386.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "17710506"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
} |
from os import path
from django.conf import settings
from openstack_dashboard.test import helpers as test
class ErrorPageTests(test.TestCase):
"""Tests for error pages."""
urls = 'openstack_dashboard.test.error_pages_urls'
def test_500_error(self):
TEMPLATE_DIRS = (path.join(settings.ROOT_PATH, 'templates'),)
with self.settings(TEMPLATE_DIRS=TEMPLATE_DIRS):
response = self.client.get('/500/')
self.assertTrue('Server error' in response.content)
| {
"content_hash": "6e838a885bd878f713a80bab1dbc0858",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 31.5625,
"alnum_prop": 0.689108910891089,
"repo_name": "rd37/horizon",
"id": "8a0732bfdcd80f3ac984f3c3d1ceba85c2f115e0",
"size": "1190",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/tests/error_pages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "334034"
},
{
"name": "JavaScript",
"bytes": "707335"
},
{
"name": "Python",
"bytes": "3254186"
},
{
"name": "Shell",
"bytes": "15924"
}
],
"symlink_target": ""
} |
import imp
import inspect
import os
def full_path(path_to_config):
if path_to_config.startswith(os.sep):
return path_to_config
else:
curframe = inspect.currentframe()
outerframes = inspect.getouterframes(curframe)
this_dir = os.path.dirname(__file__)
call_dir = None
for (frame, filename, line_number,
function_name, lines, index) in outerframes:
call_dir = os.path.dirname(filename)
if this_dir != call_dir:
break
path_to_config = os.sep.join([
call_dir, path_to_config
])
return path_to_config
def _import(filename):
(path, name) = os.path.split(filename)
(name, ext) = os.path.splitext(name)
(fp, filename, data) = imp.find_module(name, [path])
try:
return imp.load_module(name, fp, filename, data)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
def create_if_not_exist(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class Config(object):
def update(self, new_values):
self.__dict__.update(new_values)
create_if_not_exist(self.SCREENSHOTS_DIR)
def setup_config(path_to_config):
temp_config = _import(full_path(path_to_config))
global config
config.update(temp_config.Config.__dict__)
config = Config()
| {
"content_hash": "17bda38948387a6c81540398d2c8aeeb",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 25.375,
"alnum_prop": 0.6108374384236454,
"repo_name": "2gis/vmmaster",
"id": "8ba36587e850037848285322f5693701892232fd",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "403"
},
{
"name": "Mako",
"bytes": "413"
},
{
"name": "Python",
"bytes": "380896"
},
{
"name": "Shell",
"bytes": "1842"
}
],
"symlink_target": ""
} |
import test
import warnings
from test import env
from test.asyncio_tests import AsyncIOTestCase, asyncio_test
import bson
from motor.motor_asyncio import AsyncIOMotorClientSession, AsyncIOMotorGridFSBucket
class TestAsyncIOAwait(AsyncIOTestCase):
@asyncio_test
async def test_to_list(self):
collection = self.collection
await collection.delete_many({})
results = await collection.find().sort("_id").to_list(length=None)
self.assertEqual([], results)
docs = [{"_id": 1}, {"_id": 2}]
await collection.insert_many(docs)
cursor = collection.find().sort("_id")
results = await cursor.to_list(length=None)
self.assertEqual(docs, results)
results = await cursor.to_list(length=None)
self.assertEqual([], results)
@asyncio_test
async def test_iter_cursor(self):
collection = self.collection
await collection.delete_many({})
for n_docs in 0, 1, 2, 10:
if n_docs:
docs = [{"_id": i} for i in range(n_docs)]
await collection.insert_many(docs)
# Force extra batches to test iteration.
j = 0
async for doc in collection.find().sort("_id").batch_size(3):
self.assertEqual(j, doc["_id"])
j += 1
self.assertEqual(j, n_docs)
j = 0
raw_cursor = collection.find_raw_batches().sort("_id").batch_size(3)
async for batch in raw_cursor:
j += len(bson.decode_all(batch))
await collection.delete_many({})
@asyncio_test
async def test_iter_aggregate(self):
collection = self.collection
await collection.delete_many({})
pipeline = [{"$sort": {"_id": 1}}]
# Empty iterator.
async for _ in collection.aggregate(pipeline):
self.fail()
for n_docs in 1, 2, 10:
if n_docs:
docs = [{"_id": i} for i in range(n_docs)]
await collection.insert_many(docs)
# Force extra batches to test iteration.
j = 0
cursor = collection.aggregate(pipeline).batch_size(3)
async for doc in cursor:
self.assertEqual(j, doc["_id"])
j += 1
self.assertEqual(j, n_docs)
j = 0
raw = collection.aggregate_raw_batches(pipeline).batch_size(3)
async for batch in raw:
j += len(bson.decode_all(batch))
self.assertEqual(j, n_docs)
await collection.delete_many({})
@asyncio_test
async def test_iter_gridfs(self):
gfs = AsyncIOMotorGridFSBucket(self.db)
async def cleanup():
await self.db.fs.files.delete_many({})
await self.db.fs.chunks.delete_many({})
await cleanup()
# Empty iterator.
async for _ in gfs.find({"_id": 1}):
self.fail()
data = b"data"
for n_files in 1, 2, 10:
for _ in range(n_files):
async with gfs.open_upload_stream(filename="filename") as f:
await f.write(data)
# Force extra batches to test iteration.
j = 0
async for _ in gfs.find({"filename": "filename"}).batch_size(3):
j += 1
self.assertEqual(j, n_files)
await cleanup()
await gfs.upload_from_stream_with_id(1, "filename", source=data, chunk_size_bytes=1)
cursor = gfs.find({"_id": 1})
await cursor.fetch_next
gout = cursor.next_object()
chunks = []
async for chunk in gout:
chunks.append(chunk)
self.assertEqual(len(chunks), len(data))
self.assertEqual(b"".join(chunks), data)
@asyncio_test
async def test_stream_to_handler(self):
# Sort of Tornado-specific, but it does work with asyncio.
fs = AsyncIOMotorGridFSBucket(self.db)
content_length = 1000
await fs.delete(1)
await fs.upload_from_stream_with_id(1, "filename", source=b"a" * content_length)
gridout = await fs.open_download_stream(1)
handler = test.MockRequestHandler()
await gridout.stream_to_handler(handler)
self.assertEqual(content_length, handler.n_written)
await fs.delete(1)
@asyncio_test
async def test_cursor_iter(self):
# Have we handled the async iterator change in Python 3.5.2?:
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
with warnings.catch_warnings(record=True) as w:
async for _ in self.collection.find():
pass
if w:
self.fail(w[0].message)
@asyncio_test
async def test_list_indexes(self):
await self.collection.drop()
await self.collection.create_index([("x", 1)])
await self.collection.create_index([("y", -1)])
keys = set()
async for info in self.collection.list_indexes():
keys.add(info["name"])
self.assertEqual(keys, {"_id_", "x_1", "y_-1"})
@env.require_version_min(3, 6)
@env.require_replica_set
@asyncio_test
async def test_session(self):
s = await self.cx.start_session()
self.assertIsInstance(s, AsyncIOMotorClientSession)
self.assertIs(s.client, self.cx)
self.assertFalse(s.has_ended)
await s.end_session()
self.assertTrue(s.has_ended)
# Raises a helpful error if used in a regular with-statement.
with self.assertRaises(AttributeError) as ctx:
with await self.cx.start_session():
pass
self.assertIn("async with await", str(ctx.exception))
async with await self.cx.start_session() as s:
self.assertIsInstance(s, AsyncIOMotorClientSession)
self.assertFalse(s.has_ended)
await s.end_session()
self.assertTrue(s.has_ended)
self.assertTrue(s.has_ended)
@env.require_version_min(3, 7)
@env.require_replica_set
@asyncio_test
async def test_transaction(self):
async with await self.cx.start_session() as s:
s.start_transaction()
self.assertTrue(s.in_transaction)
self.assertFalse(s.has_ended)
await s.end_session()
self.assertFalse(s.in_transaction)
self.assertTrue(s.has_ended)
async with await self.cx.start_session() as s:
# Use start_transaction in "async with", not "async with await".
with self.assertRaises(TypeError):
async with await s.start_transaction():
pass
await s.abort_transaction()
async with s.start_transaction():
self.assertTrue(s.in_transaction)
self.assertFalse(s.has_ended)
self.assertFalse(s.in_transaction)
self.assertFalse(s.has_ended)
self.assertTrue(s.has_ended)
| {
"content_hash": "075822da5e677c7dc66e025b9b1c7243",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 92,
"avg_line_length": 33.10377358490566,
"alnum_prop": 0.5749501282416642,
"repo_name": "mongodb/motor",
"id": "fbe4d45a87fb9a87d03804012312a76d661e6393",
"size": "7592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/asyncio_tests/test_asyncio_await.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "607021"
},
{
"name": "Shell",
"bytes": "3406"
}
],
"symlink_target": ""
} |
from typing import Optional, Union
import pandas as pd
from flask_babel import gettext as _
from superset.exceptions import InvalidPostProcessingError
from superset.utils.pandas_postprocessing.utils import RESAMPLE_METHOD
def resample(
df: pd.DataFrame,
rule: str,
method: str,
fill_value: Optional[Union[float, int]] = None,
) -> pd.DataFrame:
"""
support upsampling in resample
:param df: DataFrame to resample.
:param rule: The offset string representing target conversion.
:param method: How to fill the NaN value after resample.
:param fill_value: What values do fill missing.
:return: DataFrame after resample
:raises InvalidPostProcessingError: If the request in incorrect
"""
if not isinstance(df.index, pd.DatetimeIndex):
raise InvalidPostProcessingError(_("Resample operation requires DatetimeIndex"))
if method not in RESAMPLE_METHOD:
raise InvalidPostProcessingError(
_("Resample method should in ") + ", ".join(RESAMPLE_METHOD) + "."
)
if method == "asfreq" and fill_value is not None:
_df = df.resample(rule).asfreq(fill_value=fill_value)
elif method == "linear":
_df = df.resample(rule).interpolate()
else:
_df = getattr(df.resample(rule), method)()
return _df
| {
"content_hash": "0427ec6eaaaa8ab339eca869feb4ab41",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 88,
"avg_line_length": 33.794871794871796,
"alnum_prop": 0.6881638846737481,
"repo_name": "airbnb/caravel",
"id": "a82d7031e9c12615751e9717c84aa35df53e03d7",
"size": "2103",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "superset/utils/pandas_postprocessing/resample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
} |
import lacuna, lacuna.binutils.libbin, lacuna.types
import lacuna.exceptions as err
import argparse, datetime, os, re, sys
import lacuna.exceptions as err
class MailCompile(lacuna.binutils.libbin.Script):
def __init__(self):
self.version = '0.1'
self.now = datetime.datetime.now()
self.utils = lacuna.utils.Utils()
parser = argparse.ArgumentParser(
description = 'Combines all mail messages within a date range matching a single subject, into a single report. Good for compiling attack messages.',
epilog = "Full docs can be found at http://tmtowtdi.github.io/MontyLacuna/scripts/build_ships.html",
)
parser.add_argument( 'subject',
metavar = '<subject>',
action = 'store',
help = "Messages whose subjects match this string, in whole or in part, will be compiled. To compile all attack summaries, this can just be 'summary'. Case INsensitive",
)
parser.add_argument( '--tag',
metavar = '<tag>',
action = 'store',
type = str,
default = None,
choices = [
'Alert', 'Attack', 'Complaint', 'Colonization', 'Correspondence', 'Excavator',
'Intelligence', 'Medal', 'Mission', 'Parliament', 'Probe', 'Spies', 'Trade',
'Tutorial'
],
help = "Matches only messages marked with this tag. Tags are found in the dropdown box in the in-game mail system. Defaults to no tag (searches entire inbox). Case sensitive."
)
parser.add_argument( '--day',
metavar = '<day>',
action = 'store',
type = int,
default = self.now.day,
help = "The integer day your messages are stamped as. Defaults to today."
)
parser.add_argument( '--month',
metavar = '<month>',
action = 'store',
type = int,
default = self.now.month,
help = "The integer month your messages are stamped as. Defaults to this month."
)
parser.add_argument( '--year',
metavar = '<year>',
action = 'store',
type = int,
default = self.now.year,
help = "The four-digit year your messages are stamped as. Defaults to this year."
)
super().__init__(parser)
self.client.cache_on("my_inbox")
self.inbox = self.client.get_inbox()
def date_matches( self, dt:datetime.datetime ):
if dt.day == self.args.day and dt.month == self.args.month and dt.year == self.args.year:
return True
return False
def find_start_id( self, msgs:list ):
""" Given a list of messages, returns the ID of the first message that matches
our requested date. If none of the messages matches, returns None.
"""
for m in msgs:
dt = self.utils.tle2time( m.date )
if self.date_matches( dt ):
return m.id
return None
def find_end_id( self, msgs:list, start_id:int ):
""" Given a list of messages, returns the ID of the last message that matches
our requested date. If none of the messages matches, returns None.
"""
last_id = start_id
for m in msgs:
if m.id >= last_id: # newer messages will have higher IDs
continue
dt = self.utils.tle2time( m.date )
if self.date_matches( dt ):
last_id = m.id
else:
return last_id
if last_id:
### We reached the end of our set of messages without changing to
### a different day. The last message we saw is our last one.
return last_id
else:
return None
def get_dated_summaries( self ):
""" Returns the summaries of the messages that fall on our requested date.
Returns:
messages (list): :class:`lacuna.inbox.MessageSummary` objects
"""
start_id = None
end_id = None
all_messages = []
for i in range(1, 11):
### Only reading the first 10 pages is arbitrary, but seems
### reasonable.
opts = {}
opts['page_number'] = i
if self.args.tag:
opts['tags'] = [ self.args.tag ]
page_messages, count = self.inbox.view_inbox( opts )
if not start_id:
start_id = self.find_start_id( page_messages )
if start_id and not end_id:
end_id = self.find_end_id( page_messages, start_id )
all_messages += page_messages
if start_id and end_id:
break
self.client.user_logger.debug( "{}, {}".format(start_id, end_id) )
### If the requested date goes past the first 10 pages, we're going to
### punt and still only return messages that appear on those first 10
### pages.
if not start_id:
raise KeyError("I could not find messages from the requested date within the first 10 pages of messages.")
if not end_id:
end_id = all_messages[-1].id
### I keep thinking linearly. In my head, if start_id is 1, then
### end_id should be 5.
###
### WRONG -- the end_id message is older than the start_id message, so
### end_id will always be SMALLER than start_id.
matching_messages = []
for m in all_messages:
if m.id <= start_id:
if m.id >= end_id:
matching_messages.append(m)
else:
break
return matching_messages
def get_matching_summaries( self, messages:list ):
""" Returns a list of messages whose subject line matches the requested subject.
Arguments:
messages (list): :class:`lacuna.inbox.MessageSummary` to comb through
Returns:
matches (list): :class:`lacuna.inbox.MessageSummary` whose subject matched
"""
matches = []
pat = re.compile( self.args.subject, re.IGNORECASE )
for m in messages:
if pat.search( m.subject ):
matches.append(m)
return matches
def compile_full_messages( self, summaries:list ):
msgs = []
for s in summaries:
message = self.inbox.read_message( s.id )
mbody = "========== BEGIN TRANSMISSION ==========\n"
mbody += "Subject: " + message.subject + "\n"
mbody += "Date: " + message.date + "\n\n"
mbody += message.body
if 'table' in message.attachments:
mbody += "\n\nTHIS MESSAGE CONTAINS A TABLE:\n"
mbody += "------------------------------\n"
for row in message.attachments['table']:
strrow = [str(i) for i in row]
mbody += ','.join(strrow) + "\n"
mbody += "========== END TRANSMISSION ==========\n"
msgs.append(mbody)
report = "\n\n".join(msgs)
return report
| {
"content_hash": "3dba5e37ab065428c5d41a5f32ced7c2",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 197,
"avg_line_length": 41.84090909090909,
"alnum_prop": 0.5264801738185768,
"repo_name": "tmtowtdi/MontyLacuna",
"id": "e0d134ece99776babd8a62a289f9a09e74a30ada",
"size": "7365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/lacuna/binutils/libmail_compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36255146"
},
{
"name": "Shell",
"bytes": "2766"
}
],
"symlink_target": ""
} |
from setuptools import setup
import os
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name="sanji",
version="1.0.2",
description="Sanji Framework SDK",
long_description=read('README.rst'),
url="https://github.com/Sanji-IO/sanji",
author="Sanji Team",
author_email="sanji@moxa.com",
license="MIT",
packages=["sanji", "sanji.connection", "sanji.model"],
install_requires=["voluptuous", "simplejson"],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| {
"content_hash": "2af2924fecd45a799f9299700132a643",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 31.647058823529413,
"alnum_prop": 0.6096654275092936,
"repo_name": "imZack/sanji",
"id": "9e7b2c86bcdd08fefb6fdb97a89ce57fc324abf6",
"size": "1076",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "119228"
}
],
"symlink_target": ""
} |
import python as LibPKMN
if __name__ == "__main__":
item_bag = LibPKMN.bag("Platinum")
# Add items directly to pocket
medicine_pocket = item_bag.get_pocket("Medicine")
medicine_pocket.add_item(LibPKMN.Items.POTION, 10) # TODO: fix str <-> LibPKMN.pokemon_text typemap
# Check item amount from both bag and pocket
assert item_bag.get_pocket("Medicine").get_item_amount(LibPKMN.Items.POTION) == 10
assert item_bag.get_item_amount(LibPKMN.Items.POTION) == 10 | {
"content_hash": "cf3fc127136f86ffb37fa4795fc60477",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 103,
"avg_line_length": 38.07692307692308,
"alnum_prop": 0.6828282828282828,
"repo_name": "codemonkey85/LibPKMN",
"id": "8d02cc7b698531098aa5340f08fd3c87a030e8cb",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python_bag_pocket_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4856931"
},
{
"name": "C#",
"bytes": "11100"
},
{
"name": "C++",
"bytes": "1193996"
},
{
"name": "Java",
"bytes": "2981"
},
{
"name": "Python",
"bytes": "6588"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
#
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Generates CSSStyleDeclaration template file from css property definitions
defined in WebKit."""
import tempfile, os, re
COMMENT_LINE_PREFIX = ' * '
# TODO(efortuna): Pull from DEPS so that we have latest css *in sync* with our
# Dartium. Then remove the checked in CSSPropertyNames.in.
SOURCE_PATH = 'CSSPropertyNames.in'
#SOURCE_PATH = 'Source/WebCore/css/CSSPropertyNames.in'
TEMPLATE_FILE = '../templates/html/impl/impl_CSSStyleDeclaration.darttemplate'
# These are the properties that are supported on all Dart project supported
# browsers as camelCased names on the CssStyleDeclaration.
BROWSER_PATHS = [
'cssProperties.CSS21.txt', # Remove when we have samples from all browsers.
'cssProperties.ie9.txt',
'cssProperties.ie10.txt',
'cssProperties.ie11.txt',
'cssProperties.ff36.txt',
'cssProperties.chrome40.txt',
'cssProperties.safari-7.1.3.txt',
'cssProperties.mobileSafari-8.2.txt',
'cssProperties.iPad4Air.onGoogleSites.txt',
]
# Supported annotations for any specific CSS properties.
annotated = {
'transition':
'''@SupportedBrowser(SupportedBrowser.CHROME)
@SupportedBrowser(SupportedBrowser.FIREFOX)
@SupportedBrowser(SupportedBrowser.IE, '10')
@SupportedBrowser(SupportedBrowser.SAFARI)'''
}
class Error:
def __init__(self, message):
self.message = message
def __repr__(self):
return self.message
def camelCaseName(name):
"""Convert a CSS property name to a lowerCamelCase name."""
name = name.replace('-webkit-', '')
words = []
for word in name.split('-'):
if words:
words.append(word.title())
else:
words.append(word)
return ''.join(words)
def dashifyName(camelName):
def fix(match):
return '-' + match.group(0).lower()
return re.sub(r'[A-Z]', fix, camelName)
def isCommentLine(line):
return line.strip() == '' or line.startswith('#') or line.startswith('//')
def readCssProperties(filename):
data = open(filename).readlines()
data = sorted([d.strip() for d in set(data) if not isCommentLine(d)])
return data
def GenerateCssTemplateFile():
data = open(SOURCE_PATH).readlines()
# filter CSSPropertyNames.in to only the properties
# TODO(efortuna): do we also want CSSPropertyNames.in?
data = [d.strip() for d in data if not isCommentLine(d) and not '=' in d]
browser_props = [readCssProperties(file) for file in BROWSER_PATHS]
universal_properties = reduce(lambda a, b: set(a).intersection(b),
browser_props)
universal_properties = universal_properties.difference(['cssText'])
universal_properties = universal_properties.intersection(
map(camelCaseName, data))
class_file = open(TEMPLATE_FILE, 'w')
class_file.write("""
// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// WARNING: DO NOT EDIT THIS TEMPLATE FILE.
// The template file was generated by scripts/css_code_generator.py
// Source of CSS properties:
// %s
part of $LIBRARYNAME;
""" % SOURCE_PATH)
class_file.write("""
$(ANNOTATIONS)$(NATIVESPEC)$(CLASS_MODIFIERS)class $CLASSNAME $EXTENDS with
$(CLASSNAME)Base $IMPLEMENTS {
factory $CLASSNAME() => new CssStyleDeclaration.css('');
factory $CLASSNAME.css(String css) {
final style = new DivElement().style;
style.cssText = css;
return style;
}
/// Returns the value of the property if the provided *CSS* property
/// name is supported on this element and if the value is set. Otherwise
/// returns an empty string.
///
/// Please note the property name uses camelCase, not-hyphens.
String getPropertyValue(String propertyName) {
var propValue = _getPropertyValueHelper(propertyName);
return propValue ?? '';
}
String _getPropertyValueHelper(String propertyName) {
return _getPropertyValue(_browserPropertyName(propertyName));
}
/**
* Returns true if the provided *CSS* property name is supported on this
* element.
*
* Please note the property name camelCase, not-hyphens. This
* method returns true if the property is accessible via an unprefixed _or_
* prefixed property.
*/
bool supportsProperty(String propertyName) {
return _supportsProperty(propertyName) ||
_supportsProperty(_camelCase("${Device.cssPrefix}$propertyName"));
}
bool _supportsProperty(String propertyName) {
return JS('bool', '# in #', propertyName, this);
}
void setProperty(String propertyName, String value, [String priority]) {
return _setPropertyHelper(_browserPropertyName(propertyName),
value, priority);
}
String _browserPropertyName(String propertyName) {
String name = _readCache(propertyName);
if (name is String) return name;
name = _supportedBrowserPropertyName(propertyName);
_writeCache(propertyName, name);
return name;
}
String _supportedBrowserPropertyName(String propertyName) {
if (_supportsProperty(_camelCase(propertyName))) {
return propertyName;
}
var prefixed = "${Device.cssPrefix}$propertyName";
if (_supportsProperty(prefixed)) {
return prefixed;
}
// May be a CSS variable, just use it as provided.
return propertyName;
}
static final _propertyCache = JS('', '{}');
static String _readCache(String key) =>
JS('String|Null', '#[#]', _propertyCache, key);
static void _writeCache(String key, String value) {
JS('void', '#[#] = #', _propertyCache, key, value);
}
static String _camelCase(String hyphenated) {
var replacedMs = JS('String', r'#.replace(/^-ms-/, "ms-")', hyphenated);
return JS(
'String',
r'#.replace(/-([\da-z])/ig,'
r'function(_, letter) { return letter.toUpperCase();})',
replacedMs);
}
void _setPropertyHelper(String propertyName, String value, [String priority]) {
if (value == null) value = '';
if (priority == null) priority = '';
JS('void', '#.setProperty(#, #, #)', this, propertyName, value, priority);
}
/**
* Checks to see if CSS Transitions are supported.
*/
static bool get supportsTransitions {
return document.body.style.supportsProperty('transition');
}
$!MEMBERS
""")
for camelName in sorted(universal_properties):
property = dashifyName(camelName)
class_file.write("""
/** Gets the value of "%s" */
String get %s => this._%s;
/** Sets the value of "%s" */
set %s(String value) {
_%s = value == null ? '' : value;
}
@Returns('String')
@JSName('%s')
String _%s;
""" % (property, camelName, camelName, property, camelName, camelName,
camelName, camelName))
class_file.write("""
}
class _CssStyleDeclarationSet extends Object with CssStyleDeclarationBase {
final Iterable<Element> _elementIterable;
Iterable<CssStyleDeclaration> _elementCssStyleDeclarationSetIterable;
_CssStyleDeclarationSet(this._elementIterable) {
_elementCssStyleDeclarationSetIterable = new List.from(
_elementIterable).map((e) => e.style);
}
String getPropertyValue(String propertyName) =>
_elementCssStyleDeclarationSetIterable.first.getPropertyValue(
propertyName);
void setProperty(String propertyName, String value, [String priority]) {
_elementCssStyleDeclarationSetIterable.forEach((e) =>
e.setProperty(propertyName, value, priority));
}
""")
class_file.write("""
void _setAll(String propertyName, String value) {
value = value == null ? '' : value;
for (Element element in _elementIterable) {
JS('void', '#.style[#] = #', element, propertyName, value);
}
}
""")
for camelName in sorted(universal_properties):
property = dashifyName(camelName)
class_file.write("""
/** Sets the value of "%s" */
set %s(String value) {
_setAll('%s', value);
}
""" % (property, camelName, camelName))
class_file.write("""
// Important note: CssStyleDeclarationSet does NOT implement every method
// available in CssStyleDeclaration. Some of the methods don't make so much
// sense in terms of having a resonable value to return when you're
// considering a list of Elements. You will need to manually add any of the
// items in the MEMBERS set if you want that functionality.
}
abstract class CssStyleDeclarationBase {
String getPropertyValue(String propertyName);
void setProperty(String propertyName, String value, [String priority]);
""")
class_lines = []
seen = set()
for prop in sorted(data, key=camelCaseName):
camel_case_name = camelCaseName(prop)
upper_camel_case_name = camel_case_name[0].upper() + camel_case_name[1:]
css_name = prop.replace('-webkit-', '')
base_css_name = prop.replace('-webkit-', '')
if base_css_name in seen or base_css_name.startswith('-internal'):
continue
seen.add(base_css_name)
comment = ' /** %s the value of "' + base_css_name + '" */'
class_lines.append('\n')
class_lines.append(comment % 'Gets')
if base_css_name in annotated:
class_lines.append(annotated[base_css_name])
class_lines.append("""
String get %s =>
getPropertyValue('%s');
""" % (camel_case_name, css_name))
class_lines.append(comment % 'Sets')
if base_css_name in annotated:
class_lines.append(annotated[base_css_name])
class_lines.append("""
set %s(String value) {
setProperty('%s', value, '');
}
""" % (camel_case_name, css_name))
class_file.write(''.join(class_lines))
class_file.write('}\n')
class_file.close()
| {
"content_hash": "fc503fc03d741abb910a830bcfee04e0",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 81,
"avg_line_length": 31.658227848101266,
"alnum_prop": 0.6669332267093163,
"repo_name": "dart-archive/dart-sdk",
"id": "4bf4870f4646f9365b78a392717d7fcc9b0b15c8",
"size": "10004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/dom/scripts/css_code_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "5209"
},
{
"name": "Batchfile",
"bytes": "49645"
},
{
"name": "C",
"bytes": "169243"
},
{
"name": "C++",
"bytes": "17828371"
},
{
"name": "CMake",
"bytes": "1598"
},
{
"name": "CSS",
"bytes": "96163"
},
{
"name": "Common Lisp",
"bytes": "234"
},
{
"name": "Dart",
"bytes": "83151790"
},
{
"name": "GAP",
"bytes": "37395"
},
{
"name": "HTML",
"bytes": "679631"
},
{
"name": "Java",
"bytes": "627371"
},
{
"name": "JavaScript",
"bytes": "157014"
},
{
"name": "Makefile",
"bytes": "8113"
},
{
"name": "Python",
"bytes": "1203692"
},
{
"name": "Shell",
"bytes": "140264"
},
{
"name": "TeX",
"bytes": "271705"
}
],
"symlink_target": ""
} |
__all__ = ["InvalidCredentialsException"]
class InvalidCredentialsException(Exception):
pass
class NoHostsConnectedToException(Exception):
pass
| {
"content_hash": "c9aef7d97c3416375c5c0dbcff6da8fc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.7692307692307693,
"repo_name": "StackStorm/st2",
"id": "7a4e1ee51654dd8c510eca426e68479a3a469174",
"size": "784",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/st2common/exceptions/ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import pytest
import tomli_w
from pip._internal.build_env import BuildEnvironment
from pip._internal.req import InstallRequirement
from tests.lib import (
PipTestEnvironment,
TestData,
create_basic_wheel_for_package,
make_test_finder,
)
def make_project(
tmpdir: Path,
requires: Optional[List[str]] = None,
backend: Optional[str] = None,
backend_path: Optional[List[str]] = None,
) -> Path:
requires = requires or []
project_dir = tmpdir / "project"
project_dir.mkdir()
buildsys: Dict[str, Any] = {"requires": requires}
if backend:
buildsys["build-backend"] = backend
if backend_path:
buildsys["backend-path"] = backend_path
data = tomli_w.dumps({"build-system": buildsys})
project_dir.joinpath("pyproject.toml").write_text(data)
return project_dir
def test_backend(tmpdir: Path, data: TestData) -> None:
"""Check we can call a requirement's backend successfully"""
project_dir = make_project(tmpdir, backend="dummy_backend")
req = InstallRequirement(None, None)
req.source_dir = os.fspath(project_dir) # make req believe it has been unpacked
req.load_pyproject_toml()
env = BuildEnvironment()
finder = make_test_finder(find_links=[data.backends])
env.install_requirements(finder, ["dummy_backend"], "normal", kind="Installing")
conflicting, missing = env.check_requirements(["dummy_backend"])
assert not conflicting and not missing
assert hasattr(req.pep517_backend, "build_wheel")
with env:
assert req.pep517_backend is not None
assert req.pep517_backend.build_wheel("dir") == "Backend called"
dummy_backend_code = """\
def build_wheel(
wheel_directory,
config_settings=None,
metadata_directory=None
):
return "Backend called"
"""
def test_backend_path(tmpdir: Path, data: TestData) -> None:
"""Check we can call a backend inside the project"""
project_dir = make_project(tmpdir, backend="dummy_backend", backend_path=["."])
(project_dir / "dummy_backend.py").write_text(dummy_backend_code)
req = InstallRequirement(None, None)
req.source_dir = os.fspath(project_dir) # make req believe it has been unpacked
req.load_pyproject_toml()
env = BuildEnvironment()
assert hasattr(req.pep517_backend, "build_wheel")
with env:
assert req.pep517_backend is not None
assert req.pep517_backend.build_wheel("dir") == "Backend called"
def test_backend_path_and_dep(tmpdir: Path, data: TestData) -> None:
"""Check we can call a requirement's backend successfully"""
project_dir = make_project(
tmpdir, backend="dummy_internal_backend", backend_path=["."]
)
(project_dir / "dummy_internal_backend.py").write_text(
"from dummy_backend import build_wheel"
)
req = InstallRequirement(None, None)
req.source_dir = os.fspath(project_dir) # make req believe it has been unpacked
req.load_pyproject_toml()
env = BuildEnvironment()
finder = make_test_finder(find_links=[data.backends])
env.install_requirements(finder, ["dummy_backend"], "normal", kind="Installing")
assert hasattr(req.pep517_backend, "build_wheel")
with env:
assert req.pep517_backend is not None
assert req.pep517_backend.build_wheel("dir") == "Backend called"
def test_pep517_install(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
"""Check we can build with a custom backend"""
project_dir = make_project(
tmpdir, requires=["test_backend"], backend="test_backend"
)
result = script.pip("install", "--no-index", "-f", data.backends, project_dir)
result.assert_installed("project", editable=False)
def test_pep517_install_with_reqs(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
"""Backend generated requirements are installed in the build env"""
project_dir = make_project(
tmpdir, requires=["test_backend"], backend="test_backend"
)
project_dir.joinpath("backend_reqs.txt").write_text("simplewheel")
result = script.pip(
"install", "--no-index", "-f", data.backends, "-f", data.packages, project_dir
)
result.assert_installed("project", editable=False)
def test_no_use_pep517_without_setup_py(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
"""Using --no-use-pep517 requires setup.py"""
project_dir = make_project(
tmpdir, requires=["test_backend"], backend="test_backend"
)
result = script.pip(
"install",
"--no-index",
"--no-use-pep517",
"-f",
data.backends,
project_dir,
expect_error=True,
)
assert "project does not have a setup.py" in result.stderr
def test_conflicting_pep517_backend_requirements(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
project_dir = make_project(
tmpdir, requires=["test_backend", "simplewheel==1.0"], backend="test_backend"
)
project_dir.joinpath("backend_reqs.txt").write_text("simplewheel==2.0")
result = script.pip(
"install",
"--no-index",
"-f",
data.backends,
"-f",
data.packages,
project_dir,
expect_error=True,
)
msg = (
"Some build dependencies for {url} conflict with the backend "
"dependencies: simplewheel==1.0 is incompatible with "
"simplewheel==2.0.".format(url=project_dir.as_uri())
)
assert result.returncode != 0 and msg in result.stderr, str(result)
def test_no_check_build_deps(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
project_dir = make_project(
tmpdir, requires=["simplewheel==2.0"], backend="test_backend"
)
script.pip(
"install",
"simplewheel==1.0",
"test_backend",
"--no-index",
"-f",
data.packages,
"-f",
data.backends,
)
result = script.pip("install", "--no-build-isolation", project_dir)
result.assert_installed("project", editable=False)
def test_validate_missing_pep517_backend_requirements(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
project_dir = make_project(
tmpdir, requires=["test_backend", "simplewheel==1.0"], backend="test_backend"
)
result = script.pip(
"install",
"--no-index",
"-f",
data.backends,
"-f",
data.packages,
"--no-build-isolation",
"--check-build-dependencies",
project_dir,
expect_error=True,
)
msg = (
"Some build dependencies for {url} are missing: "
"'simplewheel==1.0', 'test_backend'.".format(url=project_dir.as_uri())
)
assert result.returncode != 0 and msg in result.stderr, str(result)
def test_validate_conflicting_pep517_backend_requirements(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
project_dir = make_project(
tmpdir, requires=["simplewheel==1.0"], backend="test_backend"
)
script.pip("install", "simplewheel==2.0", "--no-index", "-f", data.packages)
result = script.pip(
"install",
"--no-index",
"-f",
data.backends,
"-f",
data.packages,
"--no-build-isolation",
"--check-build-dependencies",
project_dir,
expect_error=True,
)
msg = (
"Some build dependencies for {url} conflict with the backend "
"dependencies: simplewheel==2.0 is incompatible with "
"simplewheel==1.0.".format(url=project_dir.as_uri())
)
assert result.returncode != 0 and msg in result.stderr, str(result)
def test_pep517_backend_requirements_satisfied_by_prerelease(
script: PipTestEnvironment,
data: TestData,
) -> None:
create_basic_wheel_for_package(script, "myreq", "1.0a1")
script.pip("install", "myreq==1.0a1", "--no-index", "-f", script.scratch_path)
script.pip("install", "test_backend", "--no-index", "-f", data.backends)
project_dir = make_project(
script.temp_path,
requires=["test_backend", "myreq"],
backend="test_backend",
)
project_dir.joinpath("backend_reqs.txt").write_text("myreq")
result = script.pip("install", "--no-index", "--no-build-isolation", project_dir)
assert "Installing backend dependencies:" not in result.stdout
def test_pep517_backend_requirements_already_satisfied(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
project_dir = make_project(
tmpdir, requires=["test_backend", "simplewheel==1.0"], backend="test_backend"
)
project_dir.joinpath("backend_reqs.txt").write_text("simplewheel")
result = script.pip(
"install",
"--no-index",
"-f",
data.backends,
"-f",
data.packages,
project_dir,
)
assert "Installing backend dependencies:" not in result.stdout
def test_pep517_install_with_no_cache_dir(
script: PipTestEnvironment, tmpdir: Path, data: TestData
) -> None:
"""Check builds with a custom backends work, even with no cache."""
project_dir = make_project(
tmpdir, requires=["test_backend"], backend="test_backend"
)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-f",
data.backends,
project_dir,
)
result.assert_installed("project", editable=False)
def make_pyproject_with_setup(
tmpdir: Path, build_system: bool = True, set_backend: bool = True
) -> Tuple[Path, str]:
project_dir = tmpdir / "project"
project_dir.mkdir()
setup_script = "from setuptools import setup\n"
expect_script_dir_on_path = True
if build_system:
buildsys: Dict[str, Any] = {
"requires": ["setuptools", "wheel"],
}
if set_backend:
buildsys["build-backend"] = "setuptools.build_meta"
expect_script_dir_on_path = False
project_data = tomli_w.dumps({"build-system": buildsys})
else:
project_data = ""
if expect_script_dir_on_path:
setup_script += "from pep517_test import __version__\n"
else:
setup_script += (
"try:\n"
" import pep517_test\n"
"except ImportError:\n"
" pass\n"
"else:\n"
' raise RuntimeError("Source dir incorrectly on sys.path")\n'
)
setup_script += 'setup(name="pep517_test", version="0.1", packages=["pep517_test"])'
project_dir.joinpath("pyproject.toml").write_text(project_data)
project_dir.joinpath("setup.py").write_text(setup_script)
package_dir = project_dir / "pep517_test"
package_dir.mkdir()
package_dir.joinpath("__init__.py").write_text('__version__ = "0.1"')
return project_dir, "pep517_test"
def test_no_build_system_section(
script: PipTestEnvironment, tmpdir: Path, data: TestData, common_wheels: Path
) -> None:
"""Check builds with setup.py, pyproject.toml, but no build-system section."""
project_dir, name = make_pyproject_with_setup(tmpdir, build_system=False)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-f",
common_wheels,
project_dir,
)
result.assert_installed(name, editable=False)
def test_no_build_backend_entry(
script: PipTestEnvironment, tmpdir: Path, data: TestData, common_wheels: Path
) -> None:
"""Check builds with setup.py, pyproject.toml, but no build-backend entry."""
project_dir, name = make_pyproject_with_setup(tmpdir, set_backend=False)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-f",
common_wheels,
project_dir,
)
result.assert_installed(name, editable=False)
def test_explicit_setuptools_backend(
script: PipTestEnvironment, tmpdir: Path, data: TestData, common_wheels: Path
) -> None:
"""Check builds with setup.py, pyproject.toml, and a build-backend entry."""
project_dir, name = make_pyproject_with_setup(tmpdir)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-f",
common_wheels,
project_dir,
)
result.assert_installed(name, editable=False)
@pytest.mark.network
def test_pep517_and_build_options(
script: PipTestEnvironment, tmpdir: Path, data: TestData, common_wheels: Path
) -> None:
"""Backend generated requirements are installed in the build env"""
project_dir, name = make_pyproject_with_setup(tmpdir)
result = script.pip(
"wheel",
"--wheel-dir",
tmpdir,
"--build-option",
"foo",
"-f",
common_wheels,
project_dir,
allow_stderr_warning=True,
)
assert "Ignoring --build-option when building" in result.stderr
assert "using PEP 517" in result.stderr
@pytest.mark.network
def test_pep517_and_global_options(
script: PipTestEnvironment, tmpdir: Path, data: TestData, common_wheels: Path
) -> None:
"""Backend generated requirements are installed in the build env"""
project_dir, name = make_pyproject_with_setup(tmpdir)
result = script.pip(
"wheel",
"--wheel-dir",
tmpdir,
"--global-option",
"foo",
"-f",
common_wheels,
project_dir,
allow_stderr_warning=True,
)
assert "Ignoring --global-option when building" in result.stderr
assert "using PEP 517" in result.stderr
| {
"content_hash": "a3b8f62814f1eac1ea517d3a9afe704c",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 88,
"avg_line_length": 32.219339622641506,
"alnum_prop": 0.6280652953663715,
"repo_name": "pypa/pip",
"id": "a642a3f8bfb4f93b3156d399622a304daa6a84d8",
"size": "13661",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/functional/test_pep517.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7137503"
}
],
"symlink_target": ""
} |
from typing import Dict, Union
from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class IOStat(RemoteStats):
METRICS_Centos = (
("rps", "r/s", 1),
("wps", "w/s", 1),
("rbps", "rkB/s", 1024), # kB -> B
("wbps", "wkB/s", 1024), # kB -> B
("avgqusz", "avgqu-sz", 1),
("await", "await", 1),
("util", "%util", 1),
)
METRICS_Ubuntu = (
("rps", "r/s", 1),
("wps", "w/s", 1),
("rbps", "rkB/s", 1024), # kB -> B
("wbps", "wkB/s", 1024), # kB -> B
("avgqusz", "aqu-sz", 1),
("util", "%util", 1),
)
def get_device_name(self, path: str) -> [Union[None, str], bool]:
stdout = self.run("df '{}' | head -2 | tail -1".format(path),
quiet=True)
if not stdout.return_code:
name = stdout.split()[0]
if name.startswith('/dev/mapper/'): # LVM
return name.split('/dev/mapper/')[1], True
elif name.startswith('/dev/md'): # Software RAID
return name, True
else:
return name, False
return None, None
def get_iostat(self, device: str) -> Dict[str, str]:
stdout = self.run(
"iostat -dkxyN 1 1 {} | grep -v '^$' | tail -n 2".format(device)
)
stdout = stdout.split()
header = stdout[:len(stdout) // 2]
data = dict()
for i, value in enumerate(stdout[len(stdout) // 2:]):
data[header[i]] = value
return data
@parallel_task(server_side=True)
def get_server_samples(self, partitions: dict) -> dict:
return self.get_samples_centos(partitions['server'])
@parallel_task(server_side=False)
def get_client_samples(self, partitions: dict) -> dict:
return self.get_samples_ubuntu(partitions['client'])
def get_samples_centos(self, partitions: Dict[str, str]) -> Dict[str, float]:
samples = {}
for purpose, path in partitions.items():
device, _ = self.get_device_name(path)
if device is not None:
stats = self.get_iostat(device)
for metric, column, multiplier in self.METRICS_Centos:
key = "{}_{}".format(purpose, metric)
samples[key] = float(stats[column]) * multiplier
return samples
def get_samples_ubuntu(self, partitions: Dict[str, str]) -> Dict[str, float]:
samples = {}
for purpose, path in partitions.items():
device, _ = self.get_device_name(path)
if device is not None:
stats = self.get_iostat(device)
for metric, column, multiplier in self.METRICS_Ubuntu:
key = "{}_{}".format(purpose, metric)
samples[key] = float(stats[column]) * multiplier
return samples
class DiskStats(IOStat):
def get_disk_stats(self, device: str):
device_name = device.split('/')[-1]
# https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
stdout = self.run("grep '{}' /proc/diskstats".format(device_name))
stats = stdout.split()
sectors_read, sectors_written = int(stats[5]), int(stats[9])
# https://www.kernel.org/doc/Documentation/block/queue-sysfs.txt
if 'nvme' in device and 'p1' not in device and 'p2' not in device:
stdout = self.run('cat /sys/block/{}/queue/hw_sector_size'.format(device_name))
else:
parent = self.run('lsblk -no pkname {}'.format(device)).strip()
stdout = self.run('cat /sys/block/{}/queue/hw_sector_size'.format(parent))
sector_size = int(stdout)
return sectors_read * sector_size, sectors_written * sector_size
@parallel_task(server_side=True)
def get_server_samples(self, partitions: dict) -> dict:
return self.get_samples(partitions['server'])
def get_samples(self, partitions: dict) -> dict:
samples = {}
for purpose, partition in partitions.items():
device, lvm_swraid = self.get_device_name(partition)
if device is not None and not lvm_swraid:
bytes_read, bytes_written = self.get_disk_stats(device)
samples[purpose + '_bytes_read'] = bytes_read
samples[purpose + '_bytes_written'] = bytes_written
return samples
| {
"content_hash": "1ce8eb2b4f1bb5369ad607b8a902f074",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 91,
"avg_line_length": 37.567796610169495,
"alnum_prop": 0.5519963907060681,
"repo_name": "couchbase/perfrunner",
"id": "62dbe5ace3d35349ad29c2ce577cbea41f176732",
"size": "4433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbagent/collectors/libstats/iostat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1853"
},
{
"name": "Dockerfile",
"bytes": "2761"
},
{
"name": "Go",
"bytes": "37531"
},
{
"name": "Groovy",
"bytes": "46365"
},
{
"name": "HCL",
"bytes": "40219"
},
{
"name": "Inno Setup",
"bytes": "25281"
},
{
"name": "JavaScript",
"bytes": "14317"
},
{
"name": "Makefile",
"bytes": "2405"
},
{
"name": "Python",
"bytes": "2416900"
},
{
"name": "Ruby",
"bytes": "154"
},
{
"name": "Shell",
"bytes": "5016"
}
],
"symlink_target": ""
} |
__author__ = 'anthony'
import websocket
import os, sys, time
from tornado import template
from tornado import testing, httpserver, gen, websocket, httpclient
import json
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_DIR = os.path.abspath(os.path.dirname(SCRIPT_DIR))
sys.path.append(SCRIPT_DIR)
sys.path.append(PROJECT_DIR)
import webrtc_server
from urllib import urlencode, quote
from collections import OrderedDict
MY_CALLER_ID = 'ME'
TEST_PEER_ID1 = 'PEER1'
TEST_PEER_ID2 = 'PEER2'
TEST_PEER_ID3 = 'PEER3'
class BasicTestCase(testing.AsyncHTTPTestCase, testing.LogTrapTestCase):
def setUp(self):
super(BasicTestCase, self).setUp()
# for peer in webrtc_server.peers.values():
# peer.close()
# for client in webrtc_server.clients.values():
# client.close()
webrtc_server.peers = {}
webrtc_server.clients = {}
def make_url(self, path, protocol='ws', **kwargs):
url = '%s://127.0.0.1:%s%s' % (protocol, self.get_http_port(), path)
query_string = urlencode(OrderedDict(kwargs))
if kwargs:
url = '%s?%s' % (url, query_string)
return url
def make_relative_url(self, path, **kwargs):
query_string = urlencode(OrderedDict(kwargs))
return '%s?%s' % (path, query_string)
def _mk_ws_connection(self, path, **kwargs):
return websocket.websocket_connect(self.make_url(path, protocol='ws', **kwargs))
def get_app(self):
return webrtc_server.app
if __name__ == "__main__":
testing.unittest.main(verbosity=1) | {
"content_hash": "39b19e87006e4cec248bb49ff2742bb1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 88,
"avg_line_length": 30,
"alnum_prop": 0.650314465408805,
"repo_name": "antsmc2/webrtc_app",
"id": "8b6a009daf286aa3c2c1e8217d7cfd77f6f7c243",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18455"
},
{
"name": "HTML",
"bytes": "19959"
},
{
"name": "JavaScript",
"bytes": "120512"
},
{
"name": "Python",
"bytes": "33435"
}
],
"symlink_target": ""
} |
"""1269. Number of Ways to Stay in the Same Place After Some Steps
https://leetcode.com/problems/number-of-ways-to-stay-in-the-same-place-after-some-steps/
You have a pointer at index 0 in an array of size arrLen. At each step, you
can move 1 position to the left, 1 position to the right in the array or stay
in the same place (The pointer should not be placed outside the array at any
time).
Given two integers steps and arrLen, return the number of ways such that your
pointer still at index 0 after exactly steps steps.
Since the answer may be too large, return it modulo 10^9 + 7.
Example 1:
Input: steps = 3, arrLen = 2
Output: 4
Explanation: There are 4 different ways to stay at index 0 after 3 steps.
Right, Left, Stay
Stay, Right, Left
Right, Stay, Left
Stay, Stay, Stay
Example 2:
Input: steps = 2, arrLen = 4
Output: 2
Explanation: There are 2 different ways to stay at index 0 after 2 steps
Right, Left
Stay, Stay
Example 3:
Input: steps = 4, arrLen = 2
Output: 8
Constraints:
1 <= steps <= 500
1 <= arrLen <= 10^6
"""
class Solution:
def num_ways(self, steps: int, arr_len: int) -> int:
base = 1000000007
dp, next_dp = [1], []
for i in range(1, steps + 1):
for j in range(min(i + 1, arr_len)):
cur = 0
if j <= i - 1:
cur = dp[j]
if j - 1 >= 0:
cur = (cur + dp[j - 1]) % base
if j + 1 <= i - 1 and j + 1 <= arr_len - 1:
cur = (cur + dp[j + 1]) % base
if i == steps:
return cur
next_dp.append(cur)
dp = next_dp
next_dp = []
return dp[0]
| {
"content_hash": "eaf00393937034ac355c3fb9008a2736",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 88,
"avg_line_length": 27.580645161290324,
"alnum_prop": 0.5824561403508772,
"repo_name": "isudox/leetcode-solution",
"id": "b52ddb0a5d632101f45ae21f4ddc099ad4c5cb74",
"size": "1710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-algorithm/leetcode/problem_1269.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "16121"
},
{
"name": "Java",
"bytes": "118043"
},
{
"name": "Python",
"bytes": "151015"
}
],
"symlink_target": ""
} |
import os
import mock
from st2tests.base import BaseActionAliasTestCase
from st2tests.fixturesloader import get_fixtures_base_path
from st2common.exceptions.content import ParseException
from st2common.models.db.actionalias import ActionAliasDB
__all__ = [
'PackActionAliasUnitTestUtils'
]
PACK_PATH_1 = os.path.join(get_fixtures_base_path(), 'packs/pack_dir_name_doesnt_match_ref')
class PackActionAliasUnitTestUtils(BaseActionAliasTestCase):
action_alias_name = 'mock'
mock_get_action_alias_db_by_name = True
def test_assertExtractedParametersMatch_success(self):
format_string = self.action_alias_db.formats[0]
command = 'show last 3 metrics for my.host'
expected_parameters = {
'count': '3',
'server': 'my.host'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
format_string = self.action_alias_db.formats[0]
command = 'show last 10 metrics for my.host.example'
expected_parameters = {
'count': '10',
'server': 'my.host.example'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
def test_assertExtractedParametersMatch_command_doesnt_match_format_string(self):
format_string = self.action_alias_db.formats[0]
command = 'show last foo'
expected_parameters = {}
expected_msg = ('Command "show last foo" doesn\'t match format string '
'"show last {{count}} metrics for {{server}}"')
self.assertRaisesRegexp(ParseException, expected_msg,
self.assertExtractedParametersMatch,
format_string=format_string,
command=command,
parameters=expected_parameters)
def test_assertCommandMatchesExactlyOneFormatString(self):
# Matches single format string
format_strings = [
'foo bar {{bar}}',
'foo bar {{baz}} baz'
]
command = 'foo bar a test=1'
self.assertCommandMatchesExactlyOneFormatString(format_strings=format_strings,
command=command)
# Matches multiple format strings
format_strings = [
'foo bar {{bar}}',
'foo bar {{baz}}'
]
command = 'foo bar a test=1'
expected_msg = ('Command "foo bar a test=1" matched multiple format '
'strings: foo bar {{bar}}, foo bar {{baz}}')
self.assertRaisesRegexp(AssertionError, expected_msg,
self.assertCommandMatchesExactlyOneFormatString,
format_strings=format_strings,
command=command)
# Doesn't matches any format strings
format_strings = [
'foo bar {{bar}}',
'foo bar {{baz}}'
]
command = 'does not match foo'
expected_msg = ('Command "does not match foo" didn\'t match any of the provided format '
'strings')
self.assertRaisesRegexp(AssertionError, expected_msg,
self.assertCommandMatchesExactlyOneFormatString,
format_strings=format_strings,
command=command)
@mock.patch.object(BaseActionAliasTestCase, '_get_base_pack_path',
mock.Mock(return_value=PACK_PATH_1))
def test_base_class_works_when_pack_directory_name_doesnt_match_pack_name(self):
# Verify that the alias can still be succesfuly loaded from disk even if the pack directory
# name doesn't match "pack" resource attribute (aka pack ref)
self.mock_get_action_alias_db_by_name = False
action_alias_db = self._get_action_alias_db_by_name(name='alias1')
self.assertEqual(action_alias_db.name, 'alias1')
self.assertEqual(action_alias_db.pack, 'pack_name_not_the_same_as_dir_name')
# Note: We mock the original method to make testing of all the edge cases easier
def _get_action_alias_db_by_name(self, name):
if not self.mock_get_action_alias_db_by_name:
return super(PackActionAliasUnitTestUtils, self)._get_action_alias_db_by_name(name)
values = {
'name': self.action_alias_name,
'pack': 'mock',
'formats': [
'show last {{count}} metrics for {{server}}',
]
}
action_alias_db = ActionAliasDB(**values)
return action_alias_db
| {
"content_hash": "ad2cc51e986556869bd3ab50eea68e55",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 99,
"avg_line_length": 41.837606837606835,
"alnum_prop": 0.5775280898876405,
"repo_name": "tonybaloney/st2",
"id": "b5101fcca59c84fd4cdd3341c9de10968773109e",
"size": "5675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/tests/unit/test_pack_action_alias_unit_testing_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from chaco.tools.better_zoom import *
| {
"content_hash": "563b4a504ba16dbb62416865551b3ef0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 38.5,
"alnum_prop": 0.7792207792207793,
"repo_name": "enthought/etsproxy",
"id": "f05045dd2d19146ef67f11ca986da875f720544d",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/chaco/tools/better_zoom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
build_service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, 'str'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_update_put_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, 'str'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class BuildServiceAgentPoolOperations(object):
"""BuildServiceAgentPoolOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
**kwargs: Any
) -> Iterable["_models.BuildServiceAgentPoolResourceCollection"]:
"""List build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildServiceAgentPoolResourceCollection or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BuildServiceAgentPoolResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.BuildServiceAgentPoolResource":
"""Get build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildServiceAgentPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: "_models.BuildServiceAgentPoolResource",
**kwargs: Any
) -> "_models.BuildServiceAgentPoolResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(agent_pool_resource, 'BuildServiceAgentPoolResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
@distributed_trace
def begin_update_put(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: "_models.BuildServiceAgentPoolResource",
**kwargs: Any
) -> LROPoller["_models.BuildServiceAgentPoolResource"]:
"""Create or update build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param build_service_name: The name of the build service resource.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource.
:type agent_pool_name: str
:param agent_pool_resource: Parameters for the update operation.
:type agent_pool_resource:
~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildServiceAgentPoolResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_04_01.models.BuildServiceAgentPoolResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BuildServiceAgentPoolResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
agent_pool_resource=agent_pool_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BuildServiceAgentPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
| {
"content_hash": "bbfb3072853b7c4a81f7e91aa33f3760",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 253,
"avg_line_length": 44.226980728051394,
"alnum_prop": 0.6519802459571996,
"repo_name": "Azure/azure-sdk-for-python",
"id": "46ed30daead5a6afa76f13b5f949ead834baf412",
"size": "21154",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2022_04_01/operations/_build_service_agent_pool_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import subprocess
import os
import sys
from openerp import report
import tempfile
import time
import logging
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
from openerp import netsvc
from openerp import pooler
from report_helper import WebKitHelper
from openerp.report.report_sxw import *
from openerp import addons
from openerp import tools
from openerp.tools.translate import _
from openerp.osv.osv import except_osv
_logger = logging.getLogger(__name__)
def mako_template(text):
"""Build a Mako template.
This template uses UTF-8 encoding
"""
tmp_lookup = TemplateLookup() #we need it in order to allow inclusion and inheritance
return Template(text, input_encoding='utf-8', output_encoding='utf-8', lookup=tmp_lookup)
class WebKitParser(report_sxw):
"""Custom class that use webkit to render HTML reports
Code partially taken from report openoffice. Thanks guys :)
"""
def __init__(self, name, table, rml=False, parser=False,
header=True, store=False):
self.parser_instance = False
self.localcontext = {}
report_sxw.__init__(self, name, table, rml, parser,
header, store)
def get_lib(self, cursor, uid):
"""Return the lib wkhtml path"""
proxy = self.pool.get('ir.config_parameter')
webkit_path = proxy.get_param(cursor, uid, 'webkit_path')
if not webkit_path:
try:
defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)
if hasattr(sys, 'frozen'):
defpath.append(os.getcwd())
if tools.config['root_path']:
defpath.append(os.path.dirname(tools.config['root_path']))
webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath))
except IOError:
webkit_path = None
if webkit_path:
return webkit_path
raise except_osv(
_('Wkhtmltopdf library path is not set'),
_('Please install executable on your system' \
' (sudo apt-get install wkhtmltopdf) or download it from here:' \
' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \
' path in the ir.config_parameter with the webkit_path key.' \
'Minimal version is 0.9.9')
)
def generate_pdf(self, comm_path, report_xml, header, footer, html_list, webkit_header=False):
"""Call webkit in order to generate pdf"""
if not webkit_header:
webkit_header = report_xml.webkit_header
tmp_dir = tempfile.gettempdir()
out_filename = tempfile.mktemp(suffix=".pdf", prefix="webkit.tmp.")
file_to_del = [out_filename]
if comm_path:
command = [comm_path]
else:
command = ['wkhtmltopdf']
command.append('--quiet')
# default to UTF-8 encoding. Use <meta charset="latin-1"> to override.
command.extend(['--encoding', 'utf-8'])
if header :
head_file = file( os.path.join(
tmp_dir,
str(time.time()) + '.head.html'
),
'w'
)
head_file.write(self._sanitize_html(header))
head_file.close()
file_to_del.append(head_file.name)
command.extend(['--header-html', head_file.name])
if footer :
foot_file = file( os.path.join(
tmp_dir,
str(time.time()) + '.foot.html'
),
'w'
)
foot_file.write(self._sanitize_html(footer))
foot_file.close()
file_to_del.append(foot_file.name)
command.extend(['--footer-html', foot_file.name])
if webkit_header.margin_top :
command.extend(['--margin-top', str(webkit_header.margin_top).replace(',', '.')])
if webkit_header.margin_bottom :
command.extend(['--margin-bottom', str(webkit_header.margin_bottom).replace(',', '.')])
if webkit_header.margin_left :
command.extend(['--margin-left', str(webkit_header.margin_left).replace(',', '.')])
if webkit_header.margin_right :
command.extend(['--margin-right', str(webkit_header.margin_right).replace(',', '.')])
if webkit_header.orientation :
command.extend(['--orientation', str(webkit_header.orientation).replace(',', '.')])
if webkit_header.format :
command.extend(['--page-size', str(webkit_header.format).replace(',', '.')])
count = 0
for html in html_list :
html_file = file(os.path.join(tmp_dir, str(time.time()) + str(count) +'.body.html'), 'w')
count += 1
html_file.write(self._sanitize_html(html))
html_file.close()
file_to_del.append(html_file.name)
command.append(html_file.name)
command.append(out_filename)
stderr_fd, stderr_path = tempfile.mkstemp(text=True)
file_to_del.append(stderr_path)
try:
status = subprocess.call(command, stderr=stderr_fd)
os.close(stderr_fd) # ensure flush before reading
stderr_fd = None # avoid closing again in finally block
fobj = open(stderr_path, 'r')
error_message = fobj.read()
fobj.close()
if not error_message:
error_message = _('No diagnosis message was provided')
else:
error_message = _('The following diagnosis message was provided:\n') + error_message
if status :
raise except_osv(_('Webkit error' ),
_("The command 'wkhtmltopdf' failed with error code = %s. Message: %s") % (status, error_message))
pdf_file = open(out_filename, 'rb')
pdf = pdf_file.read()
pdf_file.close()
finally:
if stderr_fd is not None:
os.close(stderr_fd)
for f_to_del in file_to_del:
try:
os.unlink(f_to_del)
except (OSError, IOError), exc:
_logger.error('cannot remove file %s: %s', f_to_del, exc)
return pdf
def translate_call(self, src):
"""Translate String."""
ir_translation = self.pool.get('ir.translation')
name = self.tmpl and 'addons/' + self.tmpl or None
res = ir_translation._get_source(self.parser_instance.cr, self.parser_instance.uid,
name, 'report', self.parser_instance.localcontext.get('lang', 'en_US'), src)
if res == src:
# no translation defined, fallback on None (backward compatibility)
res = ir_translation._get_source(self.parser_instance.cr, self.parser_instance.uid,
None, 'report', self.parser_instance.localcontext.get('lang', 'en_US'), src)
if not res :
return src
return res
# override needed to keep the attachments storing procedure
def create_single_pdf(self, cursor, uid, ids, data, report_xml, context=None):
"""generate the PDF"""
if context is None:
context={}
htmls = []
if report_xml.report_type != 'webkit':
return super(WebKitParser,self).create_single_pdf(cursor, uid, ids, data, report_xml, context=context)
self.parser_instance = self.parser(cursor,
uid,
self.name2,
context=context)
self.pool = pooler.get_pool(cursor.dbname)
objs = self.getObjects(cursor, uid, ids, context)
self.parser_instance.set_context(objs, data, ids, report_xml.report_type)
template = False
if report_xml.report_file :
# backward-compatible if path in Windows format
report_path = report_xml.report_file.replace("\\", "/")
path = addons.get_module_resource(*report_path.split('/'))
if path and os.path.exists(path) :
template = file(path).read()
if not template and report_xml.report_webkit_data :
template = report_xml.report_webkit_data
if not template :
raise except_osv(_('Error!'), _('Webkit report template not found!'))
header = report_xml.webkit_header.html
footer = report_xml.webkit_header.footer_html
if not header and report_xml.header:
raise except_osv(
_('No header defined for this Webkit report!'),
_('Please set a header in company settings.')
)
if not report_xml.header :
header = ''
default_head = addons.get_module_resource('report_webkit', 'default_header.html')
with open(default_head,'r') as f:
header = f.read()
css = report_xml.webkit_header.css
if not css :
css = ''
#default_filters=['unicode', 'entity'] can be used to set global filter
body_mako_tpl = mako_template(template)
helper = WebKitHelper(cursor, uid, report_xml.id, context)
if report_xml.precise_mode:
for obj in objs:
self.parser_instance.localcontext['objects'] = [obj]
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
else:
try :
html = body_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
htmls.append(html)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
head_mako_tpl = mako_template(header)
try :
head = head_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
_debug=False,
**self.parser_instance.localcontext)
except Exception:
raise except_osv(_('Webkit render!'),
exceptions.text_error_template().render())
foot = False
if footer :
foot_mako_tpl = mako_template(footer)
try :
foot = foot_mako_tpl.render(helper=helper,
css=css,
_=self.translate_call,
**self.parser_instance.localcontext)
except:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
if report_xml.webkit_debug :
try :
deb = head_mako_tpl.render(helper=helper,
css=css,
_debug=tools.ustr("\n".join(htmls)),
_=self.translate_call,
**self.parser_instance.localcontext)
except Exception:
msg = exceptions.text_error_template().render()
_logger.error(msg)
raise except_osv(_('Webkit render!'), msg)
return (deb, 'html')
bin = self.get_lib(cursor, uid)
pdf = self.generate_pdf(bin, report_xml, head, foot, htmls)
return (pdf, 'pdf')
def create(self, cursor, uid, ids, data, context=None):
"""We override the create function in order to handle generator
Code taken from report openoffice. Thanks guys :) """
pool = pooler.get_pool(cursor.dbname)
ir_obj = pool.get('ir.actions.report.xml')
report_xml_ids = ir_obj.search(cursor, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cursor,
uid,
report_xml_ids[0],
context=context)
report_xml.report_rml = None
report_xml.report_rml_content = None
report_xml.report_sxw_content_data = None
report_xml.report_sxw_content = None
report_xml.report_sxw = None
else:
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
if report_xml.report_type != 'webkit':
return super(WebKitParser, self).create(cursor, uid, ids, data, context)
result = self.create_source_pdf(cursor, uid, ids, data, report_xml, context)
if not result:
return (False,False)
return result
def _sanitize_html(self, html):
"""wkhtmltopdf expects the html page to declare a doctype.
"""
if html and html[:9].upper() != "<!DOCTYPE":
html = "<!DOCTYPE html>\n" + html
return html
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "190c019cb95368bee2382f2e26dc2de3",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 131,
"avg_line_length": 44.228125,
"alnum_prop": 0.5203843708047764,
"repo_name": "ntiufalara/openerp7",
"id": "c6a30f5e5791b1c53071ec681e315c20402927d6",
"size": "15593",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "openerp/addons/report_webkit/webkit_report.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
"""Provides device automations for Climate."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
state as state_trigger,
)
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
PERCENTAGE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN, const
TRIGGER_TYPES = {
"current_temperature_changed",
"current_humidity_changed",
"hvac_mode_changed",
}
HVAC_MODE_TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): "hvac_mode_changed",
vol.Required(state_trigger.CONF_TO): vol.In(const.HVAC_MODES),
}
)
CURRENT_TRIGGER_SCHEMA = vol.All(
DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
["current_temperature_changed", "current_humidity_changed"]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
TRIGGER_SCHEMA = vol.Any(HVAC_MODE_TRIGGER_SCHEMA, CURRENT_TRIGGER_SCHEMA)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for Climate devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# Add triggers for each entity that belongs to this integration
base_trigger = {
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
}
triggers.append(
{
**base_trigger,
CONF_TYPE: "hvac_mode_changed",
}
)
if state and const.ATTR_CURRENT_TEMPERATURE in state.attributes:
triggers.append(
{
**base_trigger,
CONF_TYPE: "current_temperature_changed",
}
)
if state and const.ATTR_CURRENT_HUMIDITY in state.attributes:
triggers.append(
{
**base_trigger,
CONF_TYPE: "current_humidity_changed",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
trigger_type = config[CONF_TYPE]
if trigger_type == "hvac_mode_changed":
state_config = {
state_trigger.CONF_PLATFORM: "state",
state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: config[state_trigger.CONF_TO],
state_trigger.CONF_FROM: [
mode
for mode in const.HVAC_MODES
if mode != config[state_trigger.CONF_TO]
],
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
numeric_state_config = {
numeric_state_trigger.CONF_PLATFORM: "numeric_state",
numeric_state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if trigger_type == "current_temperature_changed":
numeric_state_config[
numeric_state_trigger.CONF_VALUE_TEMPLATE
] = "{{ state.attributes.current_temperature }}"
else:
numeric_state_config[
numeric_state_trigger.CONF_VALUE_TEMPLATE
] = "{{ state.attributes.current_humidity }}"
if CONF_ABOVE in config:
numeric_state_config[CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[CONF_BELOW] = config[CONF_BELOW]
if CONF_FOR in config:
numeric_state_config[CONF_FOR] = config[CONF_FOR]
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
trigger_type = config[CONF_TYPE]
if trigger_type == "hvac_action_changed":
return {}
if trigger_type == "hvac_mode_changed":
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
if trigger_type == "current_temperature_changed":
unit_of_measurement = hass.config.units.temperature_unit
else:
unit_of_measurement = PERCENTAGE
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
}
| {
"content_hash": "1c222c13458784af67c757d76d0f7048",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 85,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6108452950558214,
"repo_name": "FreekingDean/home-assistant",
"id": "4ff2e8fe47755b7c55fc5fbc7fce08688b796597",
"size": "6270",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/climate/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Tests for uitls.py"""
from amaranth import Signal, Module
from ..util import TestBase
from .utils import delay
import random
class DelayTest(TestBase):
"""Tests the delay function."""
def create_dut(self):
module = Module()
self.in_ = Signal(8)
self.outs_ = delay(module, self.in_, 3)
return module
def test_it(self):
# data with 3 zeros at end, since we are delaying by 3
data = [random.randrange(256) for _ in range(20)] + [0] * 3
def process():
for i in range(len(data)):
yield self.in_.eq(data[i])
yield
for j in range(3):
self.assertEqual((yield self.outs_[j]), data[i - j])
self.run_sim(process, False)
| {
"content_hash": "36099821cdc5fac41b216181ce9ea02f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 27.642857142857142,
"alnum_prop": 0.5555555555555556,
"repo_name": "google/CFU-Playground",
"id": "c11625a30a3b38ef3ee7e0eb5de020aa9d11d201",
"size": "1368",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proj/hps_accel/gateware/gen2/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3800"
},
{
"name": "C",
"bytes": "449862"
},
{
"name": "C++",
"bytes": "4931362"
},
{
"name": "CMake",
"bytes": "976"
},
{
"name": "Dockerfile",
"bytes": "1026"
},
{
"name": "Jupyter Notebook",
"bytes": "35820"
},
{
"name": "Makefile",
"bytes": "40046"
},
{
"name": "Python",
"bytes": "1764584"
},
{
"name": "RobotFramework",
"bytes": "6125"
},
{
"name": "Scala",
"bytes": "18649"
},
{
"name": "Shell",
"bytes": "25687"
},
{
"name": "SystemVerilog",
"bytes": "6923"
},
{
"name": "Verilog",
"bytes": "6884686"
}
],
"symlink_target": ""
} |
"""
Example of a Cauchy distribution
--------------------------------
Figure 3.11.
This shows an example of a Cauchy distribution with various parameters.
We'll generate the distribution using::
dist = scipy.stats.cauchy(...)
Where ... should be filled in with the desired distribution parameters
Once we have defined the distribution parameters in this way, these
distribution objects have many useful methods; for example:
* ``dist.pmf(x)`` computes the Probability Mass Function at values ``x``
in the case of discrete distributions
* ``dist.pdf(x)`` computes the Probability Density Function at values ``x``
in the case of continuous distributions
* ``dist.rvs(N)`` computes ``N`` random variables distributed according
to the given distribution
Many further options exist; refer to the documentation of ``scipy.stats``
for more details.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import cauchy
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define the distribution parameters to be plotted
gamma_values = [0.5, 1.0, 2.0]
linestyles = ['-', '--', ':']
mu = 0
x = np.linspace(-10, 10, 1000)
#------------------------------------------------------------
# plot the distributions
fig, ax = plt.subplots(figsize=(5, 3.75))
for gamma, ls in zip(gamma_values, linestyles):
dist = cauchy(mu, gamma)
plt.plot(x, dist.pdf(x), ls=ls, color='black',
label=r'$\mu=%i,\ \gamma=%.1f$' % (mu, gamma))
plt.xlim(-4.5, 4.5)
plt.ylim(0, 0.65)
plt.xlabel('$x$')
plt.ylabel(r'$p(x|\mu,\gamma)$')
plt.title('Cauchy Distribution')
plt.legend()
plt.show()
| {
"content_hash": "f860cb98b2c1237d63fb65536fdf77c4",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 33.859154929577464,
"alnum_prop": 0.6518302828618968,
"repo_name": "eramirem/astroML",
"id": "80687df8b9cada1cb8c11328671a7cbfd4a0b4e6",
"size": "2404",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "book_figures/chapter3/fig_cauchy_distribution.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "696"
},
{
"name": "Python",
"bytes": "1083821"
}
],
"symlink_target": ""
} |
import numpy as np
def false_color(id_map):
""" Returns a numpy array of false color
Parameters
---------
id_map : 2D or 3D numpy array with id values
Returns
-------
false_color: array with the same shape than input with 3 values for each position
"""
ids = np.unique(id_map)
nids = len(ids)
# assign a random color to each id
colors = np.random.randint(0, 256, (nids, 3))
# check dimensions (should be 3 or 4)
if len(id_map.shape) == 2:
id_map = id_map[np.newaxis, ...]
# create a false color image of the original size and 3 channels
image_false_color = np.zeros((id_map.shape[0],
id_map.shape[1],
id_map.shape[2],
3))
dimensions = 3
for label, i in zip(ids, range(nids)):
(px, py, pz) = np.where(id_map == label)
image_false_color[px, py, pz, :] = colors[i, :]
#return the false color image
return np.squeeze(image_false_color)
| {
"content_hash": "6d915d064530789a094fb955af8d2585",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 27.68421052631579,
"alnum_prop": 0.5437262357414449,
"repo_name": "guillempalou/scikit-cv",
"id": "e57234c5aef29653e9eac87d42fb8ddeb20f6f9c",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skcv/util/false_color.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16403"
},
{
"name": "JavaScript",
"bytes": "54513"
},
{
"name": "Python",
"bytes": "107299"
},
{
"name": "Shell",
"bytes": "6705"
},
{
"name": "TeX",
"bytes": "63238"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.io as sio
np.random.seed(0)
VGG_MEAN = [103.939, 116.779, 123.68]
def read_mat(path):
return np.load(path)
def write_mat(path, m):
np.save(path, m)
def read_ids(path):
return [line.rstrip('\n') for line in open(path)]
class Batch_Feeder:
def __init__(self, dataset, indices, train, batchSize, padWidth, padHeight, flip=False, keepEmpty=False):
self._epochs_completed = 0
self._index_in_epoch = 0
self._dataset = dataset
self._indices = indices
self._train = train
self._batchSize = batchSize
self._padWidth = padWidth
self._padHeight = padHeight
self._flip = flip
self._keepEmpty = keepEmpty
def set_paths(self, idList=None, gtDir=None, ssDir=None):
self._paths = []
if self._train:
for id in idList:
if self._dataset == "kitti":
self._paths.append([id, gtDir+'/'+id+'.mat', ssDir+'/'+id+'.mat'])
elif self._dataset == "cityscapes" or self._dataset == "pascal":
self._paths.append([id,
gtDir + '/' + id + '_unified_GT.mat',
ssDir + '/' + id + '_unified_ss.mat'])
else:
for id in idList:
if self._dataset == "kitti":
self._paths.append([id, ssDir+'/'+id+'.mat'])
elif self._dataset == "cityscapes" or self._dataset == "pascal":
self._paths.append([id,
ssDir + '/' + id + '_unified_ss.mat'])
self._numData = len(self._paths)
assert self._batchSize < self._numData
def shuffle(self):
np.random.shuffle(self._paths)
def next_batch(self):
idBatch = []
dirBatch = []
gtBatch = []
ssBatch = []
weightBatch = []
if self._train:
while (len(idBatch) < self._batchSize):
ss = (sio.loadmat(self._paths[self._index_in_epoch][2])['mask']).astype(float)
ss = np.sum(ss[:,:,self._indices], 2)
if ss.sum() > 0 or self._keepEmpty:
idBatch.append(self._paths[self._index_in_epoch][0])
dir = (sio.loadmat(self._paths[self._index_in_epoch][1])['dir_map']).astype(float)
gt = (sio.loadmat(self._paths[self._index_in_epoch][1])['depth_map']).astype(float)
weight = (sio.loadmat(self._paths[self._index_in_epoch][1])['weight_map']).astype(float)
dirBatch.append(self.pad(dir))
gtBatch.append(self.pad(gt))
weightBatch.append(self.pad(weight))
ssBatch.append(ss)
self._index_in_epoch += 1
if self._index_in_epoch == self._numData:
self._index_in_epoch = 0
self.shuffle()
dirBatch = np.array(dirBatch)
gtBatch = np.array(gtBatch)
ssBatch = np.array(ssBatch)
weightBatch = np.array(weightBatch)
if self._flip and np.random.uniform() > 0.5:
for i in range(len(dirBatch)):
for j in range(2):
dirBatch[i,:,:,j] = np.fliplr(dirBatch[i,:,:,j])
dirBatch[i, :, :, 0] = -1 * dirBatch[i, :, :, 0]
ssBatch[i] = np.fliplr(ssBatch[i])
gtBatch[i] = np.fliplr(gtBatch[i])
weightBatch[i] = np.fliplr(weightBatch[i])
return dirBatch, gtBatch, weightBatch, ssBatch, idBatch
else:
for example in self._paths[self._index_in_epoch:min(self._index_in_epoch + self._batchSize, self._numData)]:
dirBatch.append(self.pad((sio.loadmat(example[1])['dir_map']).astype(float)))
idBatch.append(example[0])
ss = (sio.loadmat(example[2])['mask']).astype(float)
ss = np.sum(ss[:, :, self._indices], 2)
ssBatch.append(self.pad(ss))
# imageBatch = np.array(imageBatch)
dirBatch = np.array(dirBatch)
ssBatch = np.array(ssBatch)
# return imageBatch, dirBatch, ssBatch, idBatch
self._index_in_epoch += self._batchSize
return dirBatch, ssBatch, idBatch
def total_samples(self):
return self._numData
def image_scaling(self, rgb_scaled):
# if self._dataset == "cityscapes":
# rgb_scaled = skimage.transform.pyramid_reduce(rgb_scaled, sigma=0.001)
#rgb_scaled = skimage.transform.rescale(rgb_scaled, 0.5)
rgb_scaled[:,:,0] = (rgb_scaled[:,:,0] - VGG_MEAN[0])/128
rgb_scaled[:,:,1] = (rgb_scaled[:,:,1] - VGG_MEAN[1])/128
rgb_scaled[:,:,2] = (rgb_scaled[:,:,2] - VGG_MEAN[2])/128
return rgb_scaled
# Convert RGB to BGR
red, green, blue = tf.split(3, 3, rgb_scaled)
# assert red.get_shape().as_list()[1:] == [224, 224, 1]
# assert green.get_shape().as_list()[1:] == [224, 224, 1]
# assert blue.get_shape().as_list()[1:] == [224, 224, 1]
#bgr = tf.concat(3, [
# blue - VGG_MEAN[0],
# green - VGG_MEAN[1],
# red - VGG_MEAN[2],
#])
# assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
def pad(self, data):
if self._padHeight and self._padWidth:
if data.ndim == 3:
npad = ((0,self._padHeight-data.shape[0]),(0,self._padWidth-data.shape[1]),(0,0))
elif data.ndim == 2:
npad = ((0, self._padHeight - data.shape[0]), (0, self._padWidth - data.shape[1]))
padData = np.pad(data, npad, mode='constant', constant_values=0)
else:
padData = data
return padData
| {
"content_hash": "b4333cfb0bd64dcd3136e68c8136b155",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 120,
"avg_line_length": 38.05161290322581,
"alnum_prop": 0.5066124109867752,
"repo_name": "min2209/dwt",
"id": "f9b35869d76ccfc31b6700a49bdd2d49b30f8076",
"size": "5898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WTN/ioUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "5938"
},
{
"name": "Python",
"bytes": "103353"
}
],
"symlink_target": ""
} |
"""A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One:
ssl_adapter.context: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut):
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
ssl_adapter.certificate_chain: (optional) the filename of CA's intermediate
certificate bundle. This is needed for cheaper "chained root" SSL
certificates, and should be left as None if not required.
"""
import socket
import threading
import time
#from cherrypy import wsgiserver
from django_wsgiserver import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| {
"content_hash": "ae366753c527988246b79c0e7e222358",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 86,
"avg_line_length": 38.925619834710744,
"alnum_prop": 0.560615711252654,
"repo_name": "splunk/splunk-webframework",
"id": "11c63c217a95fef2c81f3a81a53cae310f96f395",
"size": "9420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/splunkdj/management/commands/wsgiserver/server/ssl_pyopenssl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1808"
},
{
"name": "CSS",
"bytes": "122646"
},
{
"name": "HTML",
"bytes": "113362"
},
{
"name": "JavaScript",
"bytes": "5135595"
},
{
"name": "Python",
"bytes": "6298367"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
} |
"""This example adds a user to a team by creating an association between them.
To determine which teams exists, run get_all_teams.py. To determine which
users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
TEAM_ID = 'INSERT_TEAM_ID_HERE'
USER_IDS = ['INSERT_USER_IDS_TO_ASSOCIATE_TO_TEAM_HERE']
def main(client, team_id, user_ids):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201508')
user_team_associations = []
for user_id in user_ids:
user_team_associations.append(
{
'teamId': team_id,
'userId': user_id
})
# Create the user team association on the server.
user_team_associations = (
user_team_association_service.createUserTeamAssociations(
user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('A user team association between user with ID \'%s\' and team with'
' ID \'%s\'was created.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, TEAM_ID, USER_IDS)
| {
"content_hash": "f301549405609dd09f2092472c7f0840",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 33.05882352941177,
"alnum_prop": 0.6868327402135231,
"repo_name": "wubr2000/googleads-python-lib",
"id": "b19ee3e9e80b4eea5facaaa3591e34708566062a",
"size": "2304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201508/user_team_association_service/create_user_team_associations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="splom", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.splom.selected.Mar
ker` instance or dict with compatible
properties
""",
),
**kwargs
)
| {
"content_hash": "2c13cc1b540dddb13e30b1a508099e84",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 34,
"alnum_prop": 0.5485294117647059,
"repo_name": "plotly/python-api",
"id": "d518a7f9a8f1e877ea878e59f098271bb2327aad",
"size": "680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/splom/_selected.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import unittest
class CalcTest(unittest.TestCase):
def test_calc_subtest(self):
for i in [2, 3, 4, 5]:
import ipdb;ipdb.set_trace()
with self.subTest(i=i):
self.assertEqual(i % 2, 0)
| {
"content_hash": "4d7a10c6679271bc629470dcfff5900e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 23.8,
"alnum_prop": 0.5546218487394958,
"repo_name": "altnight/individual-sandbox",
"id": "848ede0f1371b917346c959046b493cb5e705189",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diary/20180123/tests_ipdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62"
},
{
"name": "Dockerfile",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "28098"
},
{
"name": "JavaScript",
"bytes": "33696"
},
{
"name": "Makefile",
"bytes": "4501"
},
{
"name": "Python",
"bytes": "30715"
},
{
"name": "Ruby",
"bytes": "1803"
},
{
"name": "Shell",
"bytes": "764"
}
],
"symlink_target": ""
} |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
This script initializes RackHD stack after install.
- loads SKU packs
- loads default SKU
- sets auth user
- restarts nodes for discovery
- discovers switches and/or PDUs if available
- checks node discovery
- assigns node OBM settings
- checks pollers for data
'''
import os
import sys
import subprocess
import json
import time
import unittest
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common")
import fit_common
import pdu_lib
# Locals
MAX_CYCLES = 60
class rackhd_stack_init(unittest.TestCase):
def test01_preload_sku_packs(self):
print "**** Processing SKU Packs"
# Load SKU packs from GutHub
subprocess.call("rm -rf temp.sku; rm -rf on-skupack", shell=True)
os.mkdir("on-skupack")
# download all SKU repos and merge into on-skupack
for url in fit_common.GLOBAL_CONFIG['repos']['skupack']:
print "**** Cloning SKU Packs from " + url
subprocess.call("git clone " + url + " temp.sku", shell=True)
subprocess.call('cp -R temp.sku/* on-skupack; rm -rf temp.sku', shell=True)
# build build SKU packs
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git"] and os.path.isfile('on-skupack/' + skus + '/config.json'):
subprocess.call("cd on-skupack;mkdir -p " + skus + "/tasks " + skus + "/static "
+ skus + "/workflows " + skus + "/templates", shell=True)
subprocess.call("cd on-skupack; ./build-package.bash "
+ skus + " " + skus + " >/dev/null 2>&1", shell=True)
break
# upload SKU packs to ORA
print "**** Loading SKU Packs to server"
for subdir, dirs, files in os.walk('on-skupack/tarballs'):
for skupacks in files:
print "\n**** Loading SKU Pack for " + skupacks
fit_common.rackhdapi("/api/2.0/skus/pack", action="binary-post",
payload=file(fit_common.TEST_PATH + "on-skupack/tarballs/" + skupacks).read())
break
print "\n"
# check SKU directory against source files
errorcount = ""
skulist = json.dumps(fit_common.rackhdapi("/api/2.0/skus")['json'])
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git", "packagebuild", "tarballs"] and \
os.path.isfile('on-skupack/' + skus + '/config.json'):
try:
configfile = json.loads(open("on-skupack/" + skus + "/config.json").read())
# check if sku pack got installed
if configfile['name'] not in skulist:
print "FAILURE - Missing SKU: " + configfile['name']
errorcount += " Missing SKU: " + configfile['name']
except:
# Check is the sku pack config.json file is valid format, fails skupack install if invalid
print "FAILURE - Corrupt config.json in SKU Pack: " + str(skus) + " - not loaded"
errorcount += " Corrupt config.json in SKU Pack: " + str(skus)
break
self.assertEqual(errorcount, "", errorcount)
def test02_preload_default_sku(self):
# Load default SKU for unsupported compute nodes
print '**** Installing default SKU'
payload = {
"name": "Unsupported-Compute",
"rules": [
{
"path": "bmc.IP Address"
}
]
}
api_data = fit_common.rackhdapi("/api/2.0/skus", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
def test03_set_auth_user(self):
print '**** Installing default admin user'
fit_common.remote_shell('rm auth.json')
auth_json = open('auth.json', 'w')
auth_json.write('{"username":"' + fit_common.GLOBAL_CONFIG["api"]["admin_user"] + '", "password":"' \
+ fit_common.GLOBAL_CONFIG["api"]["admin_pass"] + '", "role":"Administrator"}')
auth_json.close()
fit_common.scp_file_to_ora('auth.json')
rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" \
+ str(fit_common.GLOBAL_CONFIG['ports']['https']) + "/api/2.0/users -d @auth.json")
if rc['exitcode'] != 0:
print "ALERT: Auth admin user not set! Please manually set the admin user account if https access is desired."
def test04_power_on_nodes(self):
# This powers on nodes via PDU or, if no PDU, power cycles nodes via IPMI to start discovery
# ServerTech PDU case
if pdu_lib.check_pdu_type() != "Unknown":
print '**** PDU found, powering on PDU outlets'
self.assertTrue(pdu_lib.pdu_control_compute_nodes("on"), 'Failed to power on all outlets')
# Wait about 30 seconds for the outlets to all come on and nodes to DHCP
fit_common.countdown(30)
# no PDU case
else:
print '**** No supported PDU found, restarting nodes using IMPI.'
# Power off all nodes
self.assertNotEqual(fit_common.power_control_all_nodes("off"), 0, 'No BMC IP addresses found')
# Power on all nodes
self.assertNotEqual(fit_common.power_control_all_nodes("on"), 0, 'No BMC IP addresses found')
# Optionally install control switch node if present
@unittest.skipUnless("control" in fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']], "")
def test05_discover_control_switch_node(self):
print "**** Creating control switch node."
payload = {
"type": "switch",
"name": "Control",
"autoDiscover": "true",
"snmpSettings":{
"host": fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']]['control'],
"community": fit_common.GLOBAL_CONFIG['snmp']['community'],
}
}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
# Optionally install data switch node if present
@unittest.skipUnless("data" in fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']], "")
def test06_discover_data_switch_node(self):
print "**** Creating data switch node."
payload = {
"type": "switch",
"name": "Data",
"autoDiscover": "true",
"snmpSettings":{
"host": fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']]['data'],
"community": fit_common.GLOBAL_CONFIG['snmp']['community'],
}
}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
# Optionally install PDU node if present
@unittest.skipUnless("pdu" in fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']], "")
def test07_discover_pdu_node(self):
print "**** Creating PDU node."
payload = {
"type": "pdu",
"name": "PDU",
"autoDiscover": "true",
"snmpSettings":{
"host": fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']]['pdu'],
"community": fit_common.GLOBAL_CONFIG['snmp']['community'],
}
}
api_data = fit_common.rackhdapi("/api/2.0/nodes/", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
def test08_check_compute_nodes(self):
print "**** Waiting for compute nodes."
c_index = 0
for c_index in range(0, MAX_CYCLES):
if "compute" in fit_common.rackhdapi("/api/2.0/nodes")['text']:
break
else:
time.sleep(30)
self.assertLess(c_index, MAX_CYCLES-1, "No compute nodes found.")
def test09_check_discovery(self):
print "**** Waiting for node Discovery to complete.\n",
# Determine if there are any active workflows. If returned value is true, obmSettings, SKUs
# and active workflows are all either present or complete. If the returned is false,
# there was a timeout and all nodes have not obtained obmSetting, SKUs, or all active
# workflows have not completed.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_workflows(MAX_CYCLES), "Node discovery not completed")
def check_for_active_workflows(self, max_time):
'''
Determine if are any active workflows.
:param Time to wait (in 10 second intervals)
:return: True - No active workflows
False - Workflows are active
'''
for _ in range(0, max_time):
nodes_data = fit_common.rackhdapi("/api/2.0/nodes")
if nodes_data['status'] == 200 and len(nodes_data['json']) > 0:
# if there are nodes present, determine if discovery has completed on them
discovery_complete = True
for node in nodes_data['json']:
if node['type'] == 'compute':
self.assertIn('id', node, 'node does not contain id')
node_id = node['id']
# determine if there are any active worlflows. If so, discovery not completed
if fit_common.check_active_workflows(node_id):
discovery_complete = False
break
if discovery_complete:
return True
time.sleep(10)
return False
def test10_apply_obm_settings(self):
print "**** Apply OBM setting to compute nodes."
self.assertTrue(fit_common.apply_obm_settings(), "OBM settings failed.")
@unittest.skipUnless("bmc" in fit_common.STACK_CONFIG[fit_common.ARGS_LIST['stack']], "")
@unittest.skip("Skipping 'test10_add_management_server' code incomplete")
def test11_add_management_server(self):
print "**** Creating management server."
usr = ""
pwd = ""
# find correct BMC passwords from global config
for creds in fit_common.GLOBAL_CONFIG['credentials']['bmc']:
if fit_common.remote_shell('ipmitool -I lanplus -H ' + fit_common.ARGS_LIST['bmc']
+ ' -U ' + creds['username'] + ' -P '
+ creds['password'] + ' fru')['exitcode'] == 0:
usr = creds['username']
pwd = creds['password']
# create management node using these creds
payload = {
"name": "Management Server",
"type": "mgmt",
"autoDiscover": "true",
"ipmi-obm-service": {
"host": fit_common.ARGS_LIST['bmc'],
"user": usr,
"password": pwd
}
}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
# run discovery workflow
payload = {
"name": "Graph.MgmtSKU.Discovery",
"options":{"defaults": {"nodeId": api_data['json']['id']}}
}
api_data = fit_common.rackhdapi("/api/2.0/nodes/" + api_data['json']['id'] + "/workflows",
action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got '
+ str(api_data['status']))
def test12_check_pollers(self):
print "**** Waiting for pollers."
# Determine if there are any pollers present. If the return value is true, there are pollers
# active. If the return value is false, pollers are not active.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_pollers(MAX_CYCLES), 'No pollers')
print "**** Waiting for pollers data."
# Determine if all the pollers have data. If the return value is true, all pollers have data
# If the return value is false, poller are working but not collecting data.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_poller_data(MAX_CYCLES), 'All pollers are not active')
def check_for_active_pollers(self, max_time):
'''
Determine if all poller are active.
:param Time to wait (in 10 second intervals)
:return: True - Poller active
False - Pollers not active
'''
for _ in range(0, max_time):
api_data = fit_common.rackhdapi('/api/2.0/pollers')
if len(api_data['json']) > 0:
return True
time.sleep(30)
return False
def check_for_active_poller_data(self, max_time):
'''
Determine if all poller have data.
:param Time to wait (in 10 second intervals)
:return: True - Poller have data
False - Not all poller have data
'''
poller_list = []
api_data = fit_common.rackhdapi('/api/2.0/pollers')
if api_data:
# set up a list of poller ids
for index in api_data['json']:
poller_list.append(index['id'])
if poller_list != []:
for _ in range(0, max_time):
# move backwards through the list allowing completed poller ids to be popped
# off the list
for i in reversed(range(len(poller_list))):
id = poller_list[i]
poll_data = fit_common.rackhdapi("/api/2.0/pollers/" + id + "/data/current")
# Check if data current returned 200 and data in the poll, if so, remove from list
if poll_data['status'] == 200 and len(poll_data['json']) != 0:
poller_list.pop(i)
if poller_list == []:
# return when all pollers look good
return True
time.sleep(10)
if poller_list != []:
print "Poller IDs with error or no data: {}".format(json.dumps(poller_list, indent=4))
return False
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5352d90efd696093aabf2f004431e1a0",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 122,
"avg_line_length": 49.109034267912776,
"alnum_prop": 0.5397107333164172,
"repo_name": "tldavies/RackHD",
"id": "6de0ac4a861f5881ad0eb4b3c636bc00a852f181",
"size": "15764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/deploy/rackhd_stack_init.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "706855"
},
{
"name": "Ruby",
"bytes": "6949"
},
{
"name": "Shell",
"bytes": "38109"
}
],
"symlink_target": ""
} |
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class StartedPage(page_module.Page):
def __init__(self, url, startup_url, page_set):
super(StartedPage, self).__init__(
url=url, page_set=page_set, startup_url=startup_url)
self.archive_data_file = 'data/startup_pages.json'
def RunNavigateSteps(self, action_runner):
action_runner.Wait(10)
def RunPageInteractions(self, action_runner):
self.RunPageInteractions(action_runner)
class StartupPagesPageSet(page_set_module.PageSet):
""" Pages for testing starting Chrome with a URL.
Note that this file can't be used with record_wpr, since record_wpr requires
a true navigate step, which we do not want for startup testing. Instead use
record_wpr startup_pages_record to record data for this test.
"""
def __init__(self):
super(StartupPagesPageSet, self).__init__(
archive_data_file='data/startup_pages.json',
bucket=page_set_module.PARTNER_BUCKET)
# Typical page.
self.AddUserStory(StartedPage('about:blank', 'about:blank', self))
# Typical page.
self.AddUserStory(StartedPage('http://bbc.co.uk', 'http://bbc.co.uk', self))
# Horribly complex page - stress test!
self.AddUserStory(StartedPage(
'http://kapook.com', 'http://kapook.com', self))
| {
"content_hash": "0e5e98d3c8fa79258350c8eb4ea76e26",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 35.39473684210526,
"alnum_prop": 0.7048327137546468,
"repo_name": "Jonekee/chromium.src",
"id": "d5b4633ba91ee24a4eb55c7a75b0a85384d73ea6",
"size": "1507",
"binary": false,
"copies": "9",
"ref": "refs/heads/nw12",
"path": "tools/perf/page_sets/startup_pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34522"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9249764"
},
{
"name": "C++",
"bytes": "222763973"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1397246"
},
{
"name": "Objective-C++",
"bytes": "7575073"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032766"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
import socket
from datetime import timedelta, datetime
import psutil
import netifaces as nif
from pyspectator.monitoring import AbcMonitor
from pyspectator.collection import LimitedTimeTable
class NetworkInterface(AbcMonitor):
def __init__(self, monitoring_latency, stats_interval=None,
ip_address=None):
super().__init__(monitoring_latency)
self.__name = None
self.__hardware_address = None
if ip_address is None:
ip_address = NetworkInterface.__get_active_ip_address()
self.__ip_address = ip_address
self.__broadcast_address = None
self.__subnet_mask = None
self.__default_route = None
self.__bytes_sent = 0
self.__bytes_recv = 0
# Get interface name, network mask and broadcast address
if self.__ip_address is not None:
for interface in nif.interfaces():
addresses = nif.ifaddresses(interface)
try:
af_inet = addresses[nif.AF_INET][0]
if af_inet['addr'] != self.__ip_address:
continue
af_link = addresses[nif.AF_LINK][0]
self.__name = NetworkInterface.__check_interface_name(
interface
)
self.__hardware_address = af_link['addr']
self.__broadcast_address = af_inet['broadcast']
self.__subnet_mask = af_inet['netmask']
break
except (IndexError, KeyError):
# ignore interfaces, which don't have MAC or IP
continue
# Get gateway address
if self.name is not None:
for gateway_info in nif.gateways()[nif.AF_INET]:
if self.name in gateway_info:
self.__default_route = gateway_info[0]
break
# Prepare to collect statistics
if stats_interval is None:
stats_interval = timedelta(hours=1)
self.__bytes_sent_stats = LimitedTimeTable(stats_interval)
self.__bytes_recv_stats = LimitedTimeTable(stats_interval)
# Read updating values at first time
self._monitoring_action()
@property
def name(self):
return self.__name
@property
def hardware_address(self):
return self.__hardware_address
@property
def ip_address(self):
return self.__ip_address
@property
def broadcast_address(self):
return self.__broadcast_address
@property
def subnet_mask(self):
return self.__subnet_mask
@property
def default_route(self):
return self.__default_route
@property
def bytes_sent(self):
return self.__bytes_sent
@property
def bytes_recv(self):
return self.__bytes_recv
@property
def bytes_sent_stats(self):
return self.__bytes_sent_stats
@property
def bytes_recv_stats(self):
return self.__bytes_recv_stats
@classmethod
def __check_interface_name(cls, name):
net_io = psutil.net_io_counters(pernic=True)
if name in net_io:
return name
for curr_nif_name in net_io:
if name in curr_nif_name:
name = curr_nif_name
break
return name
@classmethod
def __get_active_ip_address(cls):
ip_address = None
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 80))
ip_address = s.getsockname()[0]
except:
s.close()
return ip_address
def _monitoring_action(self):
net_io = psutil.net_io_counters(pernic=True)
if self.name in net_io:
net_io = net_io[self.name]
now = datetime.now()
self.__bytes_sent = net_io.bytes_sent
self.__bytes_recv_stats[now] = self.bytes_sent
self.__bytes_recv = net_io.bytes_recv
self.__bytes_recv_stats[now] = self.bytes_recv
__all__ = ['NetworkInterface']
| {
"content_hash": "166ba9adb13ccce1a9a30f40cd2856eb",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 74,
"avg_line_length": 31.676923076923078,
"alnum_prop": 0.5599805730937348,
"repo_name": "uzumaxy/pyspectator",
"id": "7f95466486a432f0a6a469f0ec37dcb2f07af475",
"size": "4118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspectator/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37384"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, division, absolute_import
import os
import sys
import subprocess
import tempfile
import shutil
import json
from .compat import open
from .exceptions import err_exit
from .utils import json_load_raise_on_duplicates
from .utils.resolver import is_container_id, resolve_path
from .cli import try_call
import dxpy
ASSET_BUILDER_PRECISE = "app-create_asset_precise"
ASSET_BUILDER_TRUSTY = "app-create_asset_trusty"
class AssetBuilderException(Exception):
"""
This exception is raised by the methods in this module
when asset building fails.
"""
pass
def parse_asset_spec(src_dir):
if not os.path.isdir(src_dir):
err_exit(src_dir + " is not a valid directory.")
if not os.path.exists(os.path.join(src_dir, "dxasset.json")):
raise AssetBuilderException("'" + src_dir + "' is not a valid DNAnexus asset source directory." +
" It does not contain a 'dxasset.json' file.")
with open(os.path.join(src_dir, "dxasset.json")) as asset_desc:
try:
return json_load_raise_on_duplicates(asset_desc)
except Exception as e:
raise AssetBuilderException("Could not parse dxasset.json file as JSON: " + e.message)
def validate_conf(asset_conf):
"""
Validates the contents of the conf file and makes sure that the required information
is provided.
{
"name": "asset_library_name",
"title": "A human readable name",
"description": " A detailed description abput the asset",
"version": "0.0.1",
"distribution": "Ubuntu",# (Optional)
"release": "12.04",
"execDepends":
[
{"name": "samtools", "package_manager": "apt"},
{"name": "bamtools"},
{"name": "bio", "package_manager": "gem", "version": "1.4.3"},
{"name": "pysam","package_manager": "pip", "version": "0.7.4"},
{"name": "Bio::SeqIO", "package_manager": "cpan", "version": "1.006924"}
]
}
"""
if 'name' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "name".')
if 'release' not in asset_conf:
asset_conf['release'] = "12.04"
elif asset_conf['release'] != '12.04' and asset_conf['release'] != '14.04':
raise AssetBuilderException('The "release" field value should be either "12.04" or "14.04".')
if 'version' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "version". ')
if 'title' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "title". ')
if 'description' not in asset_conf:
raise AssetBuilderException('The asset configuration does not contain the required field "description".')
if 'distribution' in asset_conf:
if asset_conf['distribution'] != 'Ubuntu':
raise AssetBuilderException('The distribution may only take the value "Ubuntu".')
else:
asset_conf['distribution'] = "Ubuntu"
def dx_upload(file_name, dest_project, target_folder, json_out):
try:
maybe_progress_kwargs = {} if json_out else dict(show_progress=True)
remote_file = dxpy.upload_local_file(file_name,
project=dest_project,
folder=target_folder,
wait_on_close=True,
**maybe_progress_kwargs)
return remote_file
except:
print("Failed to upload the file " + file_name, file=sys.stderr)
raise
def get_asset_make(src_dir, dest_folder, target_folder, json_out):
if os.path.exists(os.path.join(src_dir, "Makefile")):
return dx_upload(os.path.join(src_dir, "Makefile"), dest_folder, target_folder, json_out)
elif os.path.exists(os.path.join(src_dir, "makefile")):
return dx_upload(os.path.join(src_dir, "makefile"), dest_folder, target_folder, json_out)
def parse_destination(dest_str):
"""
Parses dest_str, which is (roughly) of the form
PROJECT:/FOLDER/NAME, and returns a tuple (project, folder, name)
"""
# Interpret strings of form "project-XXXX" (no colon) as project. If
# we pass these through to resolve_path they would get interpreted
# as folder names...
if is_container_id(dest_str):
return (dest_str, None, None)
# ...otherwise, defer to resolver.resolve_path. This handles the
# following forms:
#
# /FOLDER/
# /ENTITYNAME
# /FOLDER/ENTITYNAME
# [PROJECT]:
# [PROJECT]:/FOLDER/
# [PROJECT]:/ENTITYNAME
# [PROJECT]:/FOLDER/ENTITYNAME
return try_call(resolve_path, dest_str)
def get_asset_tarball(asset_name, src_dir, dest_project, dest_folder, json_out):
"""
If the src_dir contains a "resources" directory its contents are archived and
the archived file is uploaded to the platform
"""
if os.path.isdir(os.path.join(src_dir, "resources")):
temp_dir = tempfile.mkdtemp()
try:
resource_file = os.path.join(temp_dir, asset_name + "_resources.tar.gz")
cmd = ["tar", "-czf", resource_file, "-C", os.path.join(src_dir, "resources"), "."]
subprocess.check_call(cmd)
file_id = dx_upload(resource_file, dest_project, dest_folder, json_out)
return file_id
finally:
shutil.rmtree(temp_dir)
def build_asset(args):
if args.src_dir is None:
args.src_dir = os.getcwd()
dest_project_name = None
dest_folder_name = None
dest_asset_name = None
make_file = None
asset_file = None
conf_file = None
try:
asset_conf = parse_asset_spec(args.src_dir)
validate_conf(asset_conf)
asset_conf_file = os.path.join(args.src_dir, "dxasset.json")
dxpy.api.system_whoami()
dest_project_name, dest_folder_name, dest_asset_name = parse_destination(args.destination)
if dest_project_name is None:
raise AssetBuilderException("Can't build an asset without specifying a destination project; \
please use the -d/--destination flag to explicitly specify a project")
if dest_asset_name is None:
dest_asset_name = asset_conf['name']
# If dx build_asset is launched form a job, set json flag to True to avoid watching the job log
if dxpy.JOB_ID:
args.json = True
if not args.json:
print("Uploading input files for the AssetBuilder", file=sys.stderr)
conf_file = dx_upload(asset_conf_file, dest_project_name, dest_folder_name, args.json)
make_file = get_asset_make(args.src_dir, dest_project_name, dest_folder_name, args.json)
asset_file = get_asset_tarball(asset_conf['name'], args.src_dir, dest_project_name,
dest_folder_name, args.json)
input_hash = {"conf_json": dxpy.dxlink(conf_file)}
if asset_file:
input_hash["custom_asset"] = dxpy.dxlink(asset_file)
if make_file:
input_hash["asset_makefile"] = dxpy.dxlink(make_file)
builder_run_options = {
"name": dest_asset_name,
"input": input_hash
}
# Add the default destination project to app run options, if it is not run from a job
if not dxpy.JOB_ID:
builder_run_options["project"] = dest_project_name
if 'instanceType' in asset_conf:
builder_run_options["systemRequirements"] = {"*": {"instanceType": asset_conf["instanceType"]}}
if dest_folder_name:
builder_run_options["folder"] = dest_folder_name
if asset_conf['release'] == "12.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_PRECISE, input_params=builder_run_options)
elif asset_conf['release'] == "14.04":
app_run_result = dxpy.api.app_run(ASSET_BUILDER_TRUSTY, input_params=builder_run_options)
job_id = app_run_result["id"]
if not args.json:
print("\nStarted job '" + str(job_id) + "' to build the asset bundle.\n", file=sys.stderr)
if args.watch:
try:
subprocess.check_call(["dx", "watch", job_id])
except subprocess.CalledProcessError as e:
if e.returncode == 3:
# Some kind of failure to build the asset. The reason
# for the failure is probably self-evident from the
# job log (and if it's not, the CalledProcessError
# is not informative anyway), so just propagate the
# return code without additional remarks.
sys.exit(3)
else:
raise e
dxpy.DXJob(job_id).wait_on_done(interval=1)
asset_id, _ = dxpy.get_dxlink_ids(dxpy.api.job_describe(job_id)['output']['asset_bundle'])
if args.json:
print(json.dumps({"id": asset_id}))
else:
print("\nAsset bundle '" + asset_id +
"' is built and can now be used in your app/applet's dxapp.json\n", file=sys.stderr)
except Exception as de:
print(de.__class__.__name__ + ": " + str(de), file=sys.stderr)
sys.exit(1)
finally:
if conf_file:
try:
conf_file.remove()
except:
pass
if make_file:
try:
make_file.remove()
except:
pass
if asset_file:
try:
asset_file.remove()
except:
pass
| {
"content_hash": "8561c27af9b84c7b6b5d523478ef3539",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 113,
"avg_line_length": 40.11244979919679,
"alnum_prop": 0.5831998398077693,
"repo_name": "jhuttner/dx-toolkit",
"id": "ac003be72050638100aa830a3a059fde013b9619",
"size": "10663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dxpy/asset_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3198"
},
{
"name": "C",
"bytes": "6957"
},
{
"name": "C++",
"bytes": "1880260"
},
{
"name": "CMake",
"bytes": "26162"
},
{
"name": "Groovy",
"bytes": "8855"
},
{
"name": "Java",
"bytes": "2177401"
},
{
"name": "Makefile",
"bytes": "50221"
},
{
"name": "NSIS",
"bytes": "17861"
},
{
"name": "Perl",
"bytes": "46855"
},
{
"name": "PowerShell",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "2261586"
},
{
"name": "R",
"bytes": "550095"
},
{
"name": "Ruby",
"bytes": "78045"
},
{
"name": "Shell",
"bytes": "58977"
}
],
"symlink_target": ""
} |
"""
Note: we can NOT assume that a Gmail account and a Drive account for a user share the same Google
profile, because a person can connect his personal Gmail account and his company's Drive.
"""
from requests_oauthlib import OAuth2Session
from tokens.models import BearerToken, Provider
from ..models import GmailProfile
class GmailProfiler:
"""
Query Google on the behalf of a user to find profile details and store them in a GmailProfile.
"""
def __init__(self, user, token_set=None):
self.user = user
self.provider = Provider.objects.get(name=Provider.NAME_GMAIL)
if not token_set:
bearertoken = BearerToken.objects.get(user=user, provider=self.provider)
token_set = bearertoken.token_set
self.token_set = token_set
def fetch_profile_details(self):
"""
Query the user profile and store it in Moogle.
"""
data = self._query_profile_details()
self._store_profile_details(data)
def _query_profile_details(self):
google = OAuth2Session(client_id=self.provider.client_id, token=self.token_set)
resource_url = 'https://www.googleapis.com/userinfo/v2/me'
r = google.get(resource_url)
return r.json() # Returns a dictionary.
def _store_profile_details(self, data):
"""
Store profile details into `GmailProfile`.
Parameters:
data -- a dictionary like:
{
"locale": "en",
"family_name": "Doe",
"email": "johndoe@gmail.com",
"link": "https://profiles.google.com/353452857839983457489",
"verified_email": true,
"id": "353452857839983457489",
"gender": "male",
"given_name": "John",
"name": "John Doe"
}
"""
profile, __ = GmailProfile.objects.get_or_create(user=self.user)
profile.family_name = data.get('family_name', '')
profile.given_name = data.get('given_name', '')
profile.name = data.get('name', '')
profile.gender = data.get('gender', '')
profile.email = data.get('email', '')
profile.verified_email = data.get('verified_email', None)
profile.locale = data.get('locale', '')
profile.google_id = data.get('id', '')
profile.link = data.get('link', '')
profile.save() | {
"content_hash": "df4babff2e8e46cc6580d013166fc4e7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 98,
"avg_line_length": 37.15625,
"alnum_prop": 0.6026072329688814,
"repo_name": "nimiq/moogle-project",
"id": "dcca40de46be5b72276d1684f91ad67ce7526df8",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moogle/profiles/profiler/gmail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7810"
},
{
"name": "Python",
"bytes": "214937"
}
],
"symlink_target": ""
} |
"""
ZFS Storage Appliance Cinder Volume Driver
"""
import ast
import base64
from oslo.utils import units
from oslo_config import cfg
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.zfssa import zfssarest
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = log.getLogger(__name__)
ZFSSA_OPTS = [
cfg.StrOpt('zfssa_pool',
help='Storage pool name.'),
cfg.StrOpt('zfssa_project',
help='Project name.'),
cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
help='Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.'),
cfg.BoolOpt('zfssa_lun_sparse', default=False,
help='Flag to enable sparse (thin-provisioned): True, False.'),
cfg.StrOpt('zfssa_lun_compression', default='',
help='Data compression-off, lzjb, gzip-2, gzip, gzip-9.'),
cfg.StrOpt('zfssa_lun_logbias', default='',
help='Synchronous write bias-latency, throughput.'),
cfg.StrOpt('zfssa_initiator_group', default='',
help='iSCSI initiator group.'),
cfg.StrOpt('zfssa_initiator', default='',
help='iSCSI initiator IQNs. (comma separated)'),
cfg.StrOpt('zfssa_initiator_user', default='',
help='iSCSI initiator CHAP user.'),
cfg.StrOpt('zfssa_initiator_password', default='',
help='iSCSI initiator CHAP password.'),
cfg.StrOpt('zfssa_initiator_config', default='',
help='iSCSI initiators configuration.'),
cfg.StrOpt('zfssa_target_group', default='tgt-grp',
help='iSCSI target group name.'),
cfg.StrOpt('zfssa_target_user', default='',
help='iSCSI target CHAP user.'),
cfg.StrOpt('zfssa_target_password', default='',
help='iSCSI target CHAP password.'),
cfg.StrOpt('zfssa_target_portal',
help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
cfg.StrOpt('zfssa_target_interfaces',
help='Network interfaces of iSCSI targets. (comma separated)'),
cfg.IntOpt('zfssa_rest_timeout',
help='REST connection timeout. (seconds)')
]
CONF.register_opts(ZFSSA_OPTS)
ZFSSA_LUN_SPECS = {'zfssa:volblocksize',
'zfssa:sparse',
'zfssa:compression',
'zfssa:logbias'}
def factory_zfssa():
return zfssarest.ZFSSAApi()
class ZFSSAISCSIDriver(driver.ISCSIDriver):
"""ZFSSA Cinder volume driver"""
VERSION = '1.0.0'
protocol = 'iSCSI'
def __init__(self, *args, **kwargs):
super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ZFSSA_OPTS)
self.configuration.append_config_values(san.san_opts)
self.zfssa = None
self._stats = None
def _get_target_alias(self):
"""return target alias"""
return self.configuration.zfssa_target_group
def do_setup(self, context):
"""Setup - create multiple elements.
Project, initiators, initiatorgroup, target and targetgroup.
"""
lcfg = self.configuration
msg = (_('Connecting to host: %s.') % lcfg.san_ip)
LOG.info(msg)
self.zfssa = factory_zfssa()
self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
auth_str = base64.encodestring('%s:%s' %
(lcfg.san_login,
lcfg.san_password))[:-1]
self.zfssa.login(auth_str)
self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.create_initiator(zfssa_initiator['iqn'],
zfssa_initiator_group + '-' +
zfssa_initiator['iqn'],
chapuser=
zfssa_initiator['user'],
chapsecret=
zfssa_initiator['password'])
if (zfssa_initiator_group != 'default'):
self.zfssa.add_to_initiatorgroup(
zfssa_initiator['iqn'],
zfssa_initiator_group)
else:
LOG.warning(_LW('zfssa_initiator_config not found. '
'Using deprecated configuration options.'))
if (lcfg.zfssa_initiator != '' and
(lcfg.zfssa_initiator_group == '' or
lcfg.zfssa_initiator_group == 'default')):
LOG.warning(_LW('zfssa_initiator: %(ini)s'
' wont be used on '
'zfssa_initiator_group= %(inigrp)s.')
% {'ini': lcfg.zfssa_initiator,
'inigrp': lcfg.zfssa_initiator_group})
# Setup initiator and initiator group
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.create_initiator(
initiator, lcfg.zfssa_initiator_group + '-' +
initiator, chapuser=lcfg.zfssa_initiator_user,
chapsecret=lcfg.zfssa_initiator_password)
self.zfssa.add_to_initiatorgroup(
initiator, lcfg.zfssa_initiator_group)
# Parse interfaces
interfaces = []
for interface in lcfg.zfssa_target_interfaces.split(','):
if interface == '':
continue
interfaces.append(interface)
# Setup target and target group
iqn = self.zfssa.create_target(
self._get_target_alias(),
interfaces,
tchapuser=lcfg.zfssa_target_user,
tchapsecret=lcfg.zfssa_target_password)
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def check_for_setup_error(self):
"""Check that driver can login.
Check also pool, project, initiators, initiatorgroup, target and
targetgroup.
"""
lcfg = self.configuration
self.zfssa.verify_pool(lcfg.zfssa_pool)
self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.verify_initiator(zfssa_initiator['iqn'])
else:
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.verify_initiator(initiator)
self.zfssa.verify_target(self._get_target_alias())
def _get_provider_info(self, volume):
"""return provider information"""
lcfg = self.configuration
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project, volume['name'])
iqn = self.zfssa.get_target(self._get_target_alias())
loc = "%s %s %s" % (lcfg.zfssa_target_portal, iqn, lun['number'])
LOG.debug('_get_provider_info: provider_location: %s' % loc)
provider = {'provider_location': loc}
if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
provider['provider_auth'] = ('CHAP %s %s' %
lcfg.zfssa_target_user,
lcfg.zfssa_target_password)
return provider
def create_volume(self, volume):
"""Create a volume on ZFSSA"""
LOG.debug('zfssa.create_volume: volume=' + volume['name'])
lcfg = self.configuration
volsize = str(volume['size']) + 'g'
specs = self._get_voltype_specs(volume)
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize,
lcfg.zfssa_target_group,
specs)
def delete_volume(self, volume):
"""Deletes a volume with the given volume['name']."""
LOG.debug('zfssa.delete_volume: name=' + volume['name'])
lcfg = self.configuration
lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'])
# Delete clone temp snapshot. see create_cloned_volume()
if 'origin' in lun2del and 'id' in volume:
if lun2del['nodestroy']:
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
nodestroy=False)
tmpsnap = 'tmp-snapshot-%s' % volume['id']
if lun2del['origin']['snapshot'] == tmpsnap:
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
lun2del['origin']['share'],
lun2del['origin']['snapshot'])
return
self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
project=lcfg.zfssa_project,
lun=volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot with the given snapshot['name'] of the
snapshot['volume_name']
"""
LOG.debug('zfssa.create_snapshot: snapshot=' + snapshot['name'])
lcfg = self.configuration
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('zfssa.delete_snapshot: snapshot=' + snapshot['name'])
lcfg = self.configuration
has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
if has_clones:
LOG.error(_LE('Snapshot %s: has clones') % snapshot['name'])
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot - clone a snapshot"""
LOG.debug('zfssa.create_volume_from_snapshot: volume=' +
volume['name'])
LOG.debug('zfssa.create_volume_from_snapshot: snapshot=' +
snapshot['name'])
if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
exception_msg = (_('Error verifying clone size on '
'Volume clone: %(clone)s '
'Size: %(size)d on'
'Snapshot: %(snapshot)s')
% {'clone': volume['name'],
'size': volume['size'],
'snapshot': snapshot['name']})
LOG.error(exception_msg)
raise exception.InvalidInput(reason=exception_msg)
lcfg = self.configuration
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'],
volume['name'])
def _update_volume_status(self):
"""Retrieve status info from volume group."""
LOG.debug("Updating volume status")
self._stats = None
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Oracle'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
lcfg = self.configuration
(avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
if avail is None or total is None:
return
data['total_capacity_gb'] = int(total) / units.Gi
data['free_capacity_gb'] = int(avail) / units.Gi
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_status()
return self._stats
def create_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def copy_image_to_volume(self, context, volume, image_service, image_id):
self.ensure_export(context, volume)
super(ZFSSAISCSIDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
def extend_volume(self, volume, new_size):
"""Driver entry point to extent volume size."""
LOG.debug('extend_volume: volume name: %s' % volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize=new_size * units.Gi)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
zfssa_snapshot = {'volume_name': src_vref['name'],
'name': 'tmp-snapshot-%s' % volume['id']}
self.create_snapshot(zfssa_snapshot)
try:
self.create_volume_from_snapshot(volume, zfssa_snapshot)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Clone Volume:'
'%(volume)s failed from source volume:'
'%(src_vref)s')
% {'volume': volume['name'],
'src_vref': src_vref['name']})
# Cleanup snapshot
self.delete_snapshot(zfssa_snapshot)
def local_path(self, volume):
"""Not implemented"""
pass
def backup_volume(self, context, backup, backup_service):
"""Not implemented"""
pass
def restore_backup(self, context, backup, volume, backup_service):
"""Not implemented"""
pass
def _verify_clone_size(self, snapshot, size):
"""Check whether the clone size is the same as the parent volume"""
lcfg = self.configuration
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'])
return lun['size'] == size
def initialize_connection(self, volume, connector):
lcfg = self.configuration
init_groups = self.zfssa.get_initiator_initiatorgroup(
connector['initiator'])
for initiator_group in init_groups:
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
initiator_group)
iscsi_properties = {}
provider = self._get_provider_info(volume)
(target_portal, iqn, lun) = provider['provider_location'].split()
iscsi_properties['target_discovered'] = False
iscsi_properties['target_portal'] = target_portal
iscsi_properties['target_iqn'] = iqn
iscsi_properties['target_lun'] = lun
iscsi_properties['volume_id'] = volume['id']
if 'provider_auth' in provider:
(auth_method, auth_username, auth_password) = provider[
'provider_auth'].split()
iscsi_properties['auth_method'] = auth_method
iscsi_properties['auth_username'] = auth_username
iscsi_properties['auth_password'] = auth_password
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to terminate a connection for a volume."""
LOG.debug('terminate_connection: volume name: %s.' % volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
'')
def _get_voltype_specs(self, volume):
"""Get specs suitable for volume creation."""
vtype = volume.get('volume_type_id', None)
extra_specs = None
if vtype:
extra_specs = volume_types.get_volume_type_extra_specs(vtype)
return self._get_specs(extra_specs)
def _get_specs(self, xspecs):
"""Return a dict with extra specs and/or config values."""
result = {}
for spc in ZFSSA_LUN_SPECS:
val = None
prop = spc.split(':')[1]
cfg = 'zfssa_lun_' + prop
if xspecs:
val = xspecs.pop(spc, None)
if val is None:
val = self.configuration.safe_get(cfg)
if val is not None and val != '':
result.update({prop: val})
return result
| {
"content_hash": "eb18984b0ff688e6a8f24ca88cca401f",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 79,
"avg_line_length": 42.11358574610245,
"alnum_prop": 0.5274208049077159,
"repo_name": "hguemar/cinder",
"id": "48e6dbb99c4d618f339d7e11227e56510d347bfc",
"size": "19556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/zfssa/zfssaiscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="carpet.aaxis.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "9e796084786cc1d33a427a2afd0cf284",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 33.266666666666666,
"alnum_prop": 0.5771543086172345,
"repo_name": "plotly/python-api",
"id": "91ec8f102d725bb9617a26d4e3c1616601675e71",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/aaxis/tickfont/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import codecs
import yaml
from yaml.composer import Composer
from ansiblereview import Result, Error
def hunt_repeated_yaml_keys(data):
"""Parses yaml and returns a list of repeated variables and
the line on which they occur
"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = dict()
errors = dict()
for key_node, value_node in node.value:
key = key_node.value
if key in mapping:
if key in errors:
errors[key].append(key_node.__line__)
else:
errors[key] = [mapping[key], key_node.__line__]
mapping[key] = key_node.__line__
return errors
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
def repeated_vars(candidate, settings):
with codecs.open(candidate.path, 'r') as f:
errors = hunt_repeated_yaml_keys(f) or dict()
return Result(candidate, [Error(err_line, "Variable %s occurs more than once" % err_key)
for err_key in errors for err_line in errors[err_key]])
| {
"content_hash": "e6b8937b6a0e51fa8674c90e2d3d218c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 92,
"avg_line_length": 32.37777777777778,
"alnum_prop": 0.6046671242278655,
"repo_name": "willthames/ansible-review",
"id": "29946fbcfbf618ac8e6b03d926247c002615849a",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ansiblereview/vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56285"
}
],
"symlink_target": ""
} |
"""Shared utilities for container manipulation."""
import collections
import dataclasses
from typing import Any
import attr
from tensorflow_federated.python.common_libs import py_typecheck
def dataclass_to_odict(dataclass_obj: Any) -> collections.OrderedDict[str, Any]:
"""Shallow-copies a dataclass instance to an ordered dict."""
py_typecheck.check_dataclass(dataclass_obj)
# dataclasses guarantee field ordering.
fields = dataclasses.fields(dataclass_obj)
odict = collections.OrderedDict()
for field in fields:
odict[field.name] = getattr(dataclass_obj, field.name)
return odict
def attrs_class_to_odict(
attr_class_obj: Any) -> collections.OrderedDict[Any, Any]:
"""Shallow-copies an attr-class object to an ordered dict."""
py_typecheck.check_attrs(attr_class_obj)
odict = attr.asdict(
attr_class_obj, dict_factory=collections.OrderedDict, recurse=False)
return odict # pytype:disable=bad-return-type
| {
"content_hash": "d63acc2f7775093d821a0abeac3823b5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 32.758620689655174,
"alnum_prop": 0.7547368421052632,
"repo_name": "tensorflow/federated",
"id": "732b3962585f89328c71d5f7555e2d9964a1558b",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/common_libs/named_containers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
import functools
from django.core import mail
from django.http import JsonResponse, HttpResponseBadRequest
class ajax(object):
def __init__(self, login_required=False):
self.login_required = login_required
def __call__(self, fn):
@functools.wraps(fn)
def wrapped(request, *args, **kwargs):
if not request.is_ajax() and not hasattr(mail, 'outbox'):
return HttpResponseBadRequest()
# @login_required returns a 30{1,2} redirect to some login page; we
# want our Javascript applications to go down an "error" codepath
# rather than try and parse the login page HTML, etc.
if self.login_required and not request.user.is_authenticated():
return HttpResponseBadRequest()
return JsonResponse(fn(request, *args, **kwargs) or {})
return wrapped
| {
"content_hash": "26d50fa27e76de96343bc76e775cb774",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 38.43478260869565,
"alnum_prop": 0.6391402714932126,
"repo_name": "takeyourmeds/takeyourmeds-web",
"id": "40dc030d408b7525e412e766e4cfdf35a3aae72a",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "takeyourmeds/utils/ajax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "266001"
},
{
"name": "HTML",
"bytes": "80882"
},
{
"name": "JavaScript",
"bytes": "248719"
},
{
"name": "Nginx",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "107863"
},
{
"name": "Shell",
"bytes": "918"
}
],
"symlink_target": ""
} |
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training as train
class HeadTest(test.TestCase):
def test_labels_provided_error(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL,
estimator_lib.ModeKeys.PREDICT]:
with self.assertRaisesRegexp(ValueError, "labels"):
model_fn(features={}, labels={"a": "b"}, mode=mode)
def test_unknown_mode(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Unknown mode 'Not a mode'"):
model_fn(features={}, labels={}, mode="Not a mode")
class _TickerModel(object):
num_features = 1
dtype = dtypes.float32
def initialize_graph(self, input_statistics):
pass
def define_loss(self, features, mode):
del mode # unused
return model.ModelOutputs(
loss=features["ticker"],
end_state=(features["ticker"], features["ticker"]),
prediction_times=array_ops.zeros(()),
predictions={"ticker": features["ticker"]})
class EvaluationMetricsTests(test.TestCase):
def test_metrics_consistent(self):
# Tests that the identity metrics used to report in-sample predictions match
# the behavior of standard metrics.
g = ops.Graph()
with g.as_default():
features = {
feature_keys.TrainEvalFeatures.TIMES:
array_ops.zeros((1, 1)),
feature_keys.TrainEvalFeatures.VALUES:
array_ops.zeros((1, 1, 1)),
"ticker":
array_ops.reshape(
math_ops.cast(
variables.Variable(
name="ticker",
initial_value=0,
dtype=dtypes.int64,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
.count_up_to(10),
dtype=dtypes.float32), (1, 1, 1))
}
model_fn = ts_head_lib.time_series_regression_head(
model=_TickerModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec
outputs = model_fn(
features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL)
metric_update_ops = [
metric[1] for metric in outputs.eval_metric_ops.values()]
loss_mean, loss_update = metrics.mean(outputs.loss)
metric_update_ops.append(loss_update)
with self.test_session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.local_variables_initializer().run()
sess.run(metric_update_ops)
loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
(loss_mean, outputs.eval_metric_ops["ticker"][0],
outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][
0][0]))
# The custom model_utils metrics for in-sample predictions should be in
# sync with the Estimator's mean metric for model loss.
self.assertAllClose(0., loss_evaled)
self.assertAllClose((((0.,),),), metric_evaled)
self.assertAllClose((((0.,),),), nested_metric_evaled)
coordinator.request_stop()
coordinator.join()
class _StubModel(object):
num_features = 3
dtype = dtypes.float64
def initialize_graph(self, input_statistics):
del input_statistics # unused
def _stub_model_fn():
return ts_head_lib.time_series_regression_head(
model=_StubModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.AdamOptimizer(0.001)).create_estimator_spec
class TrainEvalFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]},
labels=None,
mode=mode)
def test_no_value_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={feature_keys.TrainEvalFeatures.TIMES: [[1]]},
labels=None,
mode=mode)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[[1]]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_value_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[1.]]
},
labels=None,
mode=mode)
def test_bad_value_num_features(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError, "Expected shape.*, 3.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1., 2., 3.]]],
"exogenous": [[1], [2]]
},
labels=None,
mode=mode)
class PredictFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.STATE_TUPLE: ([[[1.]]], 1.)
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_no_start_state_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.STATE_TUPLE)):
model_fn(
features={feature_keys.PredictionFeatures.TIMES: [[1]]},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: 1,
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.))
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: [[1]],
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)),
"exogenous": 1.
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
if __name__ == "__main__":
test.main()
| {
"content_hash": "895da3f89ebc33a9d6348316bc2618ff",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 80,
"avg_line_length": 37.762845849802375,
"alnum_prop": 0.619635754657735,
"repo_name": "Mazecreator/tensorflow",
"id": "7ebcebfe1b156a6c0cc86fa1ded55e4a645d291f",
"size": "10243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/timeseries/python/timeseries/head_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175403"
},
{
"name": "C++",
"bytes": "21737608"
},
{
"name": "CMake",
"bytes": "130644"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279355"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "199996"
},
{
"name": "Python",
"bytes": "17935555"
},
{
"name": "Shell",
"bytes": "320192"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
} |
from .iq_picture import PictureIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
import time
class SetPictureIqProtocolEntity(PictureIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:profile:picture", to={{jid}}">
<picture type="image" id="{{another_id}}">
{{Binary bytes of the picture when type is set.}}
</picture>
</iq>
'''
def __init__(self, jid, previewData, pictureData, pictureId = None, _id = None):
super(SetPictureIqProtocolEntity, self).__init__(jid, _id, "set")
self.setSetPictureProps(previewData, pictureData, pictureId)
def setSetPictureProps(self, previewData, pictureData, pictureId = None):
self.setPictureData(pictureData)
self.setPictureId(pictureId or str(int(time.time())))
self.setPreviewData(previewData)
def setPictureData(self, pictureData):
self.pictureData = pictureData
def getPictureData(self):
return self.pictureData
def setPreviewData(self, previewData):
self.previewData = previewData
def getPreviewData(self):
return self.previewData
def setPictureId(self, pictureId):
self.pictureId = pictureId
def getPictureId(self):
return self.pictureId
def toProtocolTreeNode(self):
node = super(PictureIqProtocolEntity, self).toProtocolTreeNode()
attribs = {"type": "image", "id": self.pictureId}
pictureNode = ProtocolTreeNode("picture", attribs, None, self.getPictureData())
previewNode = ProtocolTreeNode("picture", {"type": "preview"}, None, self.getPreviewData())
node.addChild(pictureNode)
node.addChild(previewNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = PictureIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = SetPictureIqProtocolEntity
pictureNode = None
previewNode = None
for child in node.getAllChildren("picture"):
nodeType = child.getAttributeValue("type")
if nodeType == "image":
pictureNode = child
elif nodeType == "preview":
previewNode = child
entity.setSetPictureProps(previewNode.getData(), pictureNode.getData(), pictureNode.getAttributeValue("id"))
return entity | {
"content_hash": "57a29459e8a04f8e3c35bca2ded034e5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 116,
"avg_line_length": 34.865671641791046,
"alnum_prop": 0.6630993150684932,
"repo_name": "biji/yowsup",
"id": "3ab1698843cb532c8038d3814d1a04605deacf9a",
"size": "2336",
"binary": false,
"copies": "65",
"ref": "refs/heads/master",
"path": "yowsup/layers/protocol_profiles/protocolentities/iq_picture_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222487"
}
],
"symlink_target": ""
} |
from django.urls import include, path, re_path
from .views import api_handler_404, api_root
app_name = "naovoce"
urlpatterns = [
path("", api_root, name="root"),
path("fruit/", include("naovoce.api.v1.fruit.urls")),
path("herbarium/", include("naovoce.api.v1.herbarium.urls")),
path("users/", include("naovoce.api.v1.users.urls")),
path("images/", include("naovoce.api.v1.images.urls")),
path("signup/", include("naovoce.api.v1.signup.urls")),
path("token/", include("naovoce.api.v1.token.urls")),
re_path(r"^.*", api_handler_404),
]
| {
"content_hash": "ae43bd2c654d258a4ec119e1f65e2f61",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 35.5625,
"alnum_prop": 0.6502636203866432,
"repo_name": "jsmesami/naovoce",
"id": "e32e5254e29ddac98edce77a303240960e027388",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/naovoce/api/v1/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "18118"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Python",
"bytes": "170165"
}
],
"symlink_target": ""
} |
def noop():
"""
No Operation - zc.buildout requires an endpoint method
"""
pass
def _logging():
# Logging Configuration
import sys
import logging
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def test_login():
"""
Attempt authentication
"""
_logging()
from getpass import getpass
from policy import check_password
while True:
try:
username = raw_input("User: ")
password = getpass()
except KeyboardInterrupt:
break
result = check_password(None, username, password)
if result is True:
print "Password correct!"
elif result is False:
print "Password incorrect"
else:
print "User not found"
again = raw_input("Again (yes/No)? ").lower()
if not again == "yes":
break
return 0
| {
"content_hash": "03be22941384c7b8f233d692aa8996c7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 64,
"avg_line_length": 25.522727272727273,
"alnum_prop": 0.5939447907390917,
"repo_name": "ekarulf/wsgi_googleauth",
"id": "081afa90d9f00d9329abd803cfa1b6f66b59eeef",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wsgi_googleauth/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16525"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_audience_seating_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","audience_seating_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "71020e7bee5bd8b2afa91ce81e9d504e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.709480122324159,
"repo_name": "obi-two/Rebelion",
"id": "40394741dfe69cfec3902f5903b01841418d84b1",
"size": "472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/deed/event_perk/shared_audience_seating_deed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from typing import Union
from andreas.models.post import Post
from andreas.models.server import Server
def get_post(server: Union[Server,str,int], path: str) -> Post:
"""
Returns a post with given `path` on given `server`.
:param server: The :class:`Server<andreas.models.core.Server>` object, or its id, or its name.
:param path: The path to the required :class:`Post<andreas.models.core.Post>` on the server.
"""
if isinstance(server, str):
return Post.select(Post, Server).join(Server).where(Server.name == server).where(Post.path == path).get()
else:
return Post.get(Post.server == server, Post.path == path)
def get_post_by_identifier(identifier: str) -> Post:
server, path = identifier.split('/')
return get_post(server, '/'+path) | {
"content_hash": "43ae2482ce122625c3b01e6fca10c86c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 113,
"avg_line_length": 36.27272727272727,
"alnum_prop": 0.6754385964912281,
"repo_name": "maaaks/andreas",
"id": "604d1488bdf71d748e1afa5f14e0581c72b1ea58",
"size": "798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "andreas/functions/querying.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48636"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, division
from collections import namedtuple
from copy import deepcopy
from multiprocessing.pool import ThreadPool
import yaml
import json
import os
from operator import attrgetter
import random
from string import ascii_letters
import time
import logging
from datetime import timedelta
import datetime as dt
import copy
from atomic_reactor.build import BuildResult
from atomic_reactor.plugin import BuildStepPlugin
from atomic_reactor.plugins.pre_reactor_config import get_config
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.util import get_preferred_label, df_parser, get_build_json
from atomic_reactor.constants import PLUGIN_ADD_FILESYSTEM_KEY, PLUGIN_BUILD_ORCHESTRATE_KEY
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from osbs.conf import Configuration
from osbs.constants import BUILD_FINISHED_STATES
ClusterInfo = namedtuple('ClusterInfo', ('cluster', 'platform', 'osbs', 'load'))
WORKSPACE_KEY_BUILD_INFO = 'build_info'
WORKSPACE_KEY_UPLOAD_DIR = 'koji_upload_dir'
WORKSPACE_KEY_OVERRIDE_KWARGS = 'override_kwargs'
FIND_CLUSTER_RETRY_DELAY = 15.0
FAILURE_RETRY_DELAY = 10.0
MAX_CLUSTER_FAILS = 20
def get_worker_build_info(workflow, platform):
"""
Obtain worker build information for a given platform
"""
workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
return workspace[WORKSPACE_KEY_BUILD_INFO][platform]
def get_koji_upload_dir(workflow):
"""
Obtain koji_upload_dir value used for worker builds
"""
workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
return workspace[WORKSPACE_KEY_UPLOAD_DIR]
def override_build_kwarg(workflow, k, v):
"""
Override a build-kwarg for all worker builds
"""
key = OrchestrateBuildPlugin.key
workspace = workflow.plugin_workspace.setdefault(key, {})
override_kwargs = workspace.setdefault(WORKSPACE_KEY_OVERRIDE_KWARGS, {})
override_kwargs[k] = v
class UnknownPlatformException(Exception):
""" No clusters could be found for a platform """
class AllClustersFailedException(Exception):
""" Each cluster has reached max_cluster_fails """
class ClusterRetryContext(object):
def __init__(self, max_cluster_fails):
# how many times this cluster has failed
self.fails = 0
# datetime at which attempts can resume
self.retry_at = dt.datetime.utcfromtimestamp(0)
# the number of fail counts before this cluster is considered dead
self.max_cluster_fails = max_cluster_fails
@property
def failed(self):
"""Is this cluster considered dead?"""
return self.fails >= self.max_cluster_fails
@property
def in_retry_wait(self):
"""Should we wait before trying this cluster again?"""
return dt.datetime.now() < self.retry_at
def try_again_later(self, seconds):
"""Put this cluster in retry-wait (or consider it dead)"""
if not self.failed:
self.fails += 1
self.retry_at = (dt.datetime.now() + timedelta(seconds=seconds))
def wait_for_any_cluster(contexts):
"""
Wait until any of the clusters are out of retry-wait
:param contexts: List[ClusterRetryContext]
:raises: AllClustersFailedException if no more retry attempts allowed
"""
try:
earliest_retry_at = min(ctx.retry_at for ctx in contexts.values()
if not ctx.failed)
except ValueError: # can't take min() of empty sequence
raise AllClustersFailedException(
"Could not find appropriate cluster for worker build."
)
time_until_next = earliest_retry_at - dt.datetime.now()
time.sleep(max(timedelta(seconds=0), time_until_next).seconds)
class WorkerBuildInfo(object):
def __init__(self, build, cluster_info, logger):
self.build = build
self.cluster = cluster_info.cluster
self.osbs = cluster_info.osbs
self.platform = cluster_info.platform
self.log = logging.LoggerAdapter(logger, {'arch': self.platform})
self.monitor_exception = None
@property
def name(self):
return self.build.get_build_name() if self.build else 'N/A'
def wait_to_finish(self):
self.build = self.osbs.wait_for_build_to_finish(self.name)
return self.build
def watch_logs(self):
for line in self.osbs.get_build_logs(self.name, follow=True):
self.log.info(line)
def get_annotations(self):
build_annotations = self.build.get_annotations() or {}
annotations = {
'build': {
'cluster-url': self.osbs.os_conf.get_openshift_base_uri(),
'namespace': self.osbs.os_conf.get_namespace(),
'build-name': self.name,
},
'digests': json.loads(
build_annotations.get('digests', '[]')),
'plugins-metadata': json.loads(
build_annotations.get('plugins-metadata', '{}')),
}
if 'metadata_fragment' in build_annotations and \
'metadata_fragment_key' in build_annotations:
annotations['metadata_fragment'] = build_annotations['metadata_fragment']
annotations['metadata_fragment_key'] = build_annotations['metadata_fragment_key']
return annotations
def get_fail_reason(self):
fail_reason = {}
if self.monitor_exception:
fail_reason['general'] = repr(self.monitor_exception)
elif not self.build:
fail_reason['general'] = 'build not started'
if not self.build:
return fail_reason
build_annotations = self.build.get_annotations() or {}
metadata = json.loads(build_annotations.get('plugins-metadata', '{}'))
if self.monitor_exception:
fail_reason['general'] = repr(self.monitor_exception)
try:
fail_reason.update(metadata['errors'])
except KeyError:
try:
build_name = self.build.get_build_name()
pod = self.osbs.get_pod_for_build(build_name)
fail_reason['pod'] = pod.get_failure_reason()
except (OsbsException, AttributeError):
# Catch AttributeError here because osbs-client < 0.41
# doesn't have this method
pass
return fail_reason
def cancel_build(self):
if self.build and not self.build.is_finished():
self.osbs.cancel_build(self.name)
class OrchestrateBuildPlugin(BuildStepPlugin):
"""
Start and monitor worker builds for each platform
This plugin will find the best suited worker cluster to
be used for each platform. It does so by calculating the
current load of active builds on each cluster and choosing
the one with smallest load.
The list of available worker clusters is retrieved by fetching
the result provided by reactor_config plugin.
If any of the worker builds fail, this plugin will return a
failed BuildResult. Although, it does wait for all worker builds
to complete in any case.
If all worker builds succeed, then this plugin returns a
successful BuildResult, but with a remote image result. The
image is built in the worker builds which is likely a different
host than the one running this build. This means that the local
docker daemon has no knowledge of the built image.
If build_image is defined it is passed to the worker build,
but there is still possibility to have build_imagestream inside
osbs.conf in the secret, and that would take precendence over
build_image from kwargs
"""
CONTAINER_FILENAME = 'container.yaml'
UNREACHABLE_CLUSTER_LOAD = object()
key = PLUGIN_BUILD_ORCHESTRATE_KEY
def __init__(self, tasker, workflow, platforms, build_kwargs,
osbs_client_config=None, worker_build_image=None,
config_kwargs=None,
find_cluster_retry_delay=FIND_CLUSTER_RETRY_DELAY,
failure_retry_delay=FAILURE_RETRY_DELAY,
max_cluster_fails=MAX_CLUSTER_FAILS):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param platforms: list<str>, platforms to build
:param build_kwargs: dict, keyword arguments for starting worker builds
:param osbs_client_config: str, path to directory containing osbs.conf
:param worker_build_image: str, the builder image to use for worker builds
(not used, image is inherited from the orchestrator)
:param config_kwargs: dict, keyword arguments to override worker configuration
:param find_cluster_retry_delay: the delay in seconds to try again reaching a cluster
:param failure_retry_delay: the delay in seconds to try again starting a build
:param max_cluster_fails: the maximum number of times a cluster can fail before being
ignored
"""
super(OrchestrateBuildPlugin, self).__init__(tasker, workflow)
self.platforms = set(platforms)
self.build_kwargs = build_kwargs
self.osbs_client_config = osbs_client_config
self.config_kwargs = config_kwargs or {}
self.find_cluster_retry_delay = find_cluster_retry_delay
self.failure_retry_delay = failure_retry_delay
self.max_cluster_fails = max_cluster_fails
self.koji_upload_dir = self.get_koji_upload_dir()
self.fs_task_id = self.get_fs_task_id()
self.release = self.get_release()
if worker_build_image:
self.log.warning('worker_build_image is deprecated')
self.worker_builds = []
def make_list(self, value):
if not isinstance(value, list):
value = [value]
return value
def get_platforms(self):
build_file_dir = self.workflow.source.get_build_file_path()[1]
excluded_platforms = set()
container_path = os.path.join(build_file_dir, self.CONTAINER_FILENAME)
if os.path.exists(container_path):
with open(container_path) as f:
data = yaml.load(f)
if data is None or 'platforms' not in data or data['platforms'] is None:
return self.platforms
excluded_platforms = set(self.make_list(data['platforms'].get('not', [])))
only_platforms = set(self.make_list(data['platforms'].get('only', [])))
if only_platforms:
self.platforms = self.platforms & only_platforms
return self.platforms - excluded_platforms
def get_current_builds(self, osbs):
field_selector = ','.join(['status!={status}'.format(status=status.capitalize())
for status in BUILD_FINISHED_STATES])
with osbs.retries_disabled():
return len(osbs.list_builds(field_selector=field_selector))
def get_cluster_info(self, cluster, platform):
kwargs = deepcopy(self.config_kwargs)
kwargs['conf_section'] = cluster.name
if self.osbs_client_config:
kwargs['conf_file'] = os.path.join(self.osbs_client_config, 'osbs.conf')
conf = Configuration(**kwargs)
osbs = OSBS(conf, conf)
current_builds = self.get_current_builds(osbs)
load = current_builds / cluster.max_concurrent_builds
self.log.debug('enabled cluster %s for platform %s has load %s and active builds %s/%s',
cluster.name, platform, load, current_builds, cluster.max_concurrent_builds)
return ClusterInfo(cluster, platform, osbs, load)
def get_clusters(self, platform, retry_contexts, all_clusters):
''' return clusters sorted by load. '''
possible_cluster_info = {}
candidates = set(copy.copy(all_clusters))
while candidates and not possible_cluster_info:
wait_for_any_cluster(retry_contexts)
for cluster in sorted(candidates, key=attrgetter('priority')):
ctx = retry_contexts[cluster.name]
if ctx.in_retry_wait:
continue
if ctx.failed:
continue
try:
cluster_info = self.get_cluster_info(cluster, platform)
possible_cluster_info[cluster] = cluster_info
except OsbsException:
ctx.try_again_later(self.find_cluster_retry_delay)
candidates -= set([c for c in candidates if retry_contexts[c.name].failed])
ret = sorted(possible_cluster_info.values(), key=lambda c: c.cluster.priority)
ret = sorted(ret, key=lambda c: c.load)
return ret
def get_release(self):
labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels
return get_preferred_label(labels, 'release')
@staticmethod
def get_koji_upload_dir():
"""
Create a path name for uploading files to
:return: str, path name expected to be unique
"""
dir_prefix = 'koji-upload'
random_chars = ''.join([random.choice(ascii_letters)
for _ in range(8)])
unique_fragment = '%r.%s' % (time.time(), random_chars)
return os.path.join(dir_prefix, unique_fragment)
def get_worker_build_kwargs(self, release, platform, koji_upload_dir,
task_id):
build_kwargs = deepcopy(self.build_kwargs)
build_kwargs.pop('architecture', None)
build_kwargs['release'] = release
build_kwargs['platform'] = platform
build_kwargs['koji_upload_dir'] = koji_upload_dir
build_kwargs['is_auto'] = is_rebuild(self.workflow)
if task_id:
build_kwargs['filesystem_koji_task_id'] = task_id
return build_kwargs
def _apply_repositories(self, annotations):
unique = set()
primary = set()
for build_info in self.worker_builds:
if not build_info.build:
continue
repositories = build_info.build.get_repositories() or {}
unique.update(repositories.get('unique', []))
primary.update(repositories.get('primary', []))
if unique or primary:
annotations['repositories'] = {
'unique': sorted(list(unique)),
'primary': sorted(list(primary)),
}
def _make_labels(self):
labels = {}
koji_build_id = None
ids = set([build_info.build.get_koji_build_id()
for build_info in self.worker_builds
if build_info.build])
self.log.debug('all koji-build-ids: %s', ids)
if ids:
koji_build_id = ids.pop()
if koji_build_id:
labels['koji-build-id'] = koji_build_id
return labels
def get_fs_task_id(self):
task_id = None
fs_result = self.workflow.prebuild_results.get(PLUGIN_ADD_FILESYSTEM_KEY)
if fs_result is None:
return None
try:
task_id = int(fs_result['filesystem-koji-task-id'])
except KeyError:
self.log.error("%s: expected filesystem-koji-task-id in result",
PLUGIN_ADD_FILESYSTEM_KEY)
raise
except (ValueError, TypeError):
self.log.exception("%s: returned an invalid task ID: %r",
PLUGIN_ADD_FILESYSTEM_KEY, task_id)
raise
self.log.debug("%s: got filesystem_koji_task_id of %d",
PLUGIN_ADD_FILESYSTEM_KEY, task_id)
return task_id
def do_worker_build(self, cluster_info):
workspace = self.workflow.plugin_workspace.get(self.key, {})
override_kwargs = workspace.get(WORKSPACE_KEY_OVERRIDE_KWARGS, {})
build = None
try:
kwargs = self.get_worker_build_kwargs(self.release, cluster_info.platform,
self.koji_upload_dir, self.fs_task_id)
kwargs.update(override_kwargs)
with cluster_info.osbs.retries_disabled():
build = cluster_info.osbs.create_worker_build(**kwargs)
except OsbsException:
self.log.exception('%s - failed to create worker build.',
cluster_info.platform)
raise
except Exception:
self.log.exception('%s - failed to create worker build',
cluster_info.platform)
build_info = WorkerBuildInfo(build=build, cluster_info=cluster_info, logger=self.log)
self.worker_builds.append(build_info)
if build_info.build:
try:
self.log.info('%s - created build %s on cluster %s.', cluster_info.platform,
build_info.name, cluster_info.cluster.name)
build_info.watch_logs()
build_info.wait_to_finish()
except Exception as e:
build_info.monitor_exception = e
self.log.exception('%s - failed to monitor worker build',
cluster_info.platform)
# Attempt to cancel it rather than leave it running
# unmonitored.
try:
build_info.cancel_build()
except OsbsException:
pass
def select_and_start_cluster(self, platform):
''' Choose a cluster and start a build on it '''
config = get_config(self.workflow)
clusters = config.get_enabled_clusters_for_platform(platform)
if not clusters:
raise UnknownPlatformException('No clusters found for platform {}!'
.format(platform))
retry_contexts = {
cluster.name: ClusterRetryContext(self.max_cluster_fails)
for cluster in clusters
}
while True:
try:
possible_cluster_info = self.get_clusters(platform,
retry_contexts,
clusters)
except AllClustersFailedException as ex:
cluster = ClusterInfo(None, platform, None, None)
build_info = WorkerBuildInfo(build=None,
cluster_info=cluster,
logger=self.log)
build_info.monitor_exception = repr(ex)
self.worker_builds.append(build_info)
return
for cluster_info in possible_cluster_info:
ctx = retry_contexts[cluster_info.cluster.name]
try:
self.log.info('Attempting to start build for platform %s on cluster %s',
platform, cluster_info.cluster.name)
self.do_worker_build(cluster_info)
return
except OsbsException:
ctx.try_again_later(self.failure_retry_delay)
# this will put the cluster in retry-wait when get_clusters runs
def set_build_image(self):
"""
Overrides build_image for worker, to be same as in orchestrator build
"""
spec = get_build_json().get("spec")
try:
build_name = spec['strategy']['customStrategy']['from']['name']
build_kind = spec['strategy']['customStrategy']['from']['kind']
except KeyError:
raise RuntimeError("Build object is malformed, failed to fetch buildroot image")
if build_kind == 'DockerImage':
self.config_kwargs['build_image'] = build_name
else:
raise RuntimeError("Build kind isn't 'DockerImage' but %s" % build_kind)
def run(self):
self.set_build_image()
platforms = self.get_platforms()
thread_pool = ThreadPool(len(platforms))
result = thread_pool.map_async(self.select_and_start_cluster, platforms)
try:
result.get()
# Always clean up worker builds on any error to avoid
# runaway worker builds (includes orchestrator build cancellation)
except Exception:
thread_pool.terminate()
self.log.info('build cancelled, cancelling worker builds')
if self.worker_builds:
ThreadPool(len(self.worker_builds)).map(
lambda bi: bi.cancel_build(), self.worker_builds)
while not result.ready():
result.wait(1)
raise
else:
thread_pool.close()
thread_pool.join()
annotations = {'worker-builds': {
build_info.platform: build_info.get_annotations()
for build_info in self.worker_builds if build_info.build
}}
self._apply_repositories(annotations)
labels = self._make_labels()
fail_reasons = {
build_info.platform: build_info.get_fail_reason()
for build_info in self.worker_builds
if not build_info.build or not build_info.build.is_succeeded()
}
workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
workspace[WORKSPACE_KEY_BUILD_INFO] = {build_info.platform: build_info
for build_info in self.worker_builds}
if fail_reasons:
return BuildResult(fail_reason=json.dumps(fail_reasons),
annotations=annotations, labels=labels)
return BuildResult.make_remote_image_result(annotations, labels=labels)
| {
"content_hash": "5baee423d8280cd5d1e9c820c6f66bf7",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 99,
"avg_line_length": 38.30276816608997,
"alnum_prop": 0.6068024752698857,
"repo_name": "vrutkovs/atomic-reactor",
"id": "03764dfe7f66677f282f8432421ffa7dd0ee3dff",
"size": "22139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atomic_reactor/plugins/build_orchestrate_build.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1413753"
},
{
"name": "Shell",
"bytes": "6571"
}
],
"symlink_target": ""
} |
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = int(os.getenv("MAX_TRIAL_COUNT", "5"))
PARALLEL_TRIAL_COUNT = int(os.getenv("PARALLEL_TRIAL_COUNT", "5"))
THRESHOLD = float(os.getenv("THRESHOLD", "0.6"))
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = tune_hyperparameters(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
)
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = ( # pylint: disable=unused-variable
train_and_deploy(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
serving_container_uri=serving_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
alpha=tuning_op.outputs["best_alpha"],
max_iter=tuning_op.outputs["best_max_iter"],
)
)
| {
"content_hash": "b058a1ebe8bb7b6334f015a4ffc755dc",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 36.779411764705884,
"alnum_prop": 0.6757297081167533,
"repo_name": "GoogleCloudPlatform/asl-ml-immersion",
"id": "3395689942ac7ec2fa641a33c2ec0602fd3eedcd",
"size": "3069",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "notebooks/kubeflow_pipelines/cicd/labs/pipeline_vertex/pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2934"
},
{
"name": "JavaScript",
"bytes": "1347"
},
{
"name": "Jupyter Notebook",
"bytes": "31135165"
},
{
"name": "Makefile",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "219062"
},
{
"name": "Shell",
"bytes": "11616"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import os
import platform
import pokitdok
from requests_oauthlib import OAuth2Session, TokenUpdated
from oauthlib.oauth2 import BackendApplicationClient, TokenExpiredError
from warnings import warn
class PokitDokClient(object):
"""
PokitDok Platform API Client
This class provides a wrapper around requests and requests-oauth
to handle common API operations
"""
def __init__(self, client_id, client_secret, base="https://platform.pokitdok.com", version="v4",
redirect_uri=None, scope=None, auto_refresh=False, token_refresh_callback=None, code=None,
token=None):
"""
Initialize a new PokitDok API Client
:param client_id: The client id for your PokitDok Platform Application
:param client_secret: The client secret for your PokitDok Platform Application
:param base: The base URL to use for API requests. Defaults to https://platform.pokitdok.com
:param version: The API version that should be used for requests. Defaults to the latest version.
:param redirect_uri: The Redirect URI set for the PokitDok Platform Application.
This value is managed at https://platform.pokitdok.com in the App Settings
:param scope: a list of scope names that should be used when requesting authorization
:param auto_refresh: Boolean to indicate whether or not access tokens should be automatically
refreshed when they expire.
:param token_refresh_callback: a function that should be called when token information is refreshed.
:param code: code value received from an authorization code grant
:param token: The current API access token for your PokitDok Platform Application. If not provided a new
token is generated. Defaults to None.
API clients to reuse an access token across requests. Defaults to None.
"""
self.base_headers = {
'User-Agent': 'pokitdok-python#{0}#{1}#{2}#{3}'.format(pokitdok.__version__,
platform.python_version(),
platform.system(),
platform.release())
}
self.json_headers = {
'Content-type': 'application/json',
}
self.json_headers.update(self.base_headers)
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.scope = scope
self.code = code
self.auto_refresh = auto_refresh
self.token_refresh_callback = token_refresh_callback
self.token = token
self.url_base = "{0}/api/{1}".format(base, version)
self.token_url = "{0}/oauth2/token".format(base)
self.authorize_url = "{0}/oauth2/authorize".format(base)
self.api_client = None
self.status_code = 0
self.activities_url = "/activities/{0}"
self.ccd_url = "/ccd/"
self.claims_url = "/claims/"
self.claims_convert_url = "/claims/convert"
self.claims_status_url = "/claims/status"
self.eligibility_url = "/eligibility/"
self.enrollment_url = "/enrollment/"
self.enrollment_snapshot_url = "/enrollment/snapshot"
self.enrollment_snapshot_data_url = "/enrollment/snapshot/{0}/data"
self.icd_url = "/icd/convert/{0}"
self.identity_post_url = "/identity/"
self.identity_put_url = "/identity/{0}"
self.identity_get_url = "/identity"
self.identity_match_url = "/identity/match"
self.identity_history_url = "{0}/identity/{1}/history"
self.identity_proof_generate_url = "/identity/proof/questions/generate/"
self.identity_proof_score_url = "/identity/proof/questions/score/"
self.identity_proof_valid_url = "/identity/proof/valid/"
self.mpc_url = "/mpc/{0}"
self.oop_insurance_estimate_url = "/oop/insurance-estimate"
self.oop_insurance_price_url = "/oop/insurance-load-price"
self.pharmacy_formulary_url = "/pharmacy/formulary"
self.pharmacy_network_url = "/pharmacy/network"
self.pharmacy_plans_url = "/pharmacy/plans"
self.plans_url = "/plans/"
self.prices_cash_url = "/prices/cash"
self.prices_insurance_url = "/prices/insurance"
self.providers_url = "/providers/{0}"
self.appointments_url = "/schedule/appointments/{0}"
self.appointment_types_url = "/schedule/appointmenttypes/{0}"
self.schedulers_url = "/schedule/schedulers/{0}"
self.schedule_slots_url = "/schedule/slots/"
self.trading_partners_url = "/tradingpartners/{0}"
self.initialize_api_client()
if self.token is None:
self.fetch_access_token(code=self.code)
def initialize_api_client(self):
"""
Initialize OAuth2Session client depending on client credentials flow or authorization grant flow
"""
if self.code is None:
# client credentials flow
self.api_client = OAuth2Session(self.client_id, client=BackendApplicationClient(self.client_id),
token=self.token)
else:
# authorization grant flow
refresh_url = self.token_url if self.auto_refresh else None
self.api_client = OAuth2Session(self.client_id, redirect_uri=self.redirect_uri, scope=self.scope,
auto_refresh_url=refresh_url, token_updater=self.token_refresh_callback,
auto_refresh_kwargs={
'client_id': self.client_id,
'client_secret': self.client_secret})
def authorization_url(self):
"""
Construct OAuth2 Authorization Grant URL
:return: (authorization url, state value) tuple
"""
self.initialize_api_client()
return self.api_client.authorization_url(self.authorize_url)
def fetch_access_token(self, code=None):
"""
Retrieves an OAuth2 access token.
:param code: optional code value obtained via an authorization grant
:return: the client application's token information as a dictionary
"""
self.token = self.api_client.fetch_token(token_url=self.token_url, code=code, client_id=self.client_id,
client_secret=self.client_secret, scope=self.scope)
return self.token
def request(self, path, method='get', data=None, files=None, **kwargs):
"""
General method for submitting an API request
:param path: the API request path
:param method: the http request method that should be used
:param data: dictionary of request data that should be used for post/put requests
:param files: dictionary of file information when the API accepts file uploads as input
:param kwargs: optional keyword arguments to be relayed along as request parameters
:return:
"""
if data and not files:
headers = self.json_headers
request_data = json.dumps(data)
else:
headers = self.base_headers
request_data = data
request_url = "{0}{1}".format(self.url_base, path)
request_method = getattr(self.api_client, method)
try:
response = request_method(request_url, data=request_data, files=files, params=kwargs, headers=headers)
self.status_code = response.status_code
if self.status_code == 401:
# if TokenExpiredError is not raised but it should have been, we'll raise it explicitly here
# https://github.com/oauthlib/oauthlib/pull/506 could cause this code path to be followed.
# this special handling can likely be removed once https://github.com/oauthlib/oauthlib/pull/506
# rolls into a new oauthlib release
raise TokenExpiredError('Access Token has expired. Please, re-authenticate. '
'Use auto_refresh=True to have your client auto refresh')
return response.json()
except (TokenUpdated, TokenExpiredError):
if self.auto_refresh:
# Re-fetch token and try request again
self.fetch_access_token(self.code)
return request_method(request_url, data=request_data, files=files, params=kwargs, headers=headers).json()
else:
self.status_code = 401 # UNAUTHORIZED
raise TokenExpiredError('Access Token has expired. Please, re-authenticate. '
'Use auto_refresh=True to have your client auto refresh')
def get(self, path, **kwargs):
"""
Convenience method for submitting a GET API request via the `request` method
"""
return self.request(path, method='get', **kwargs)
def put(self, path, **kwargs):
"""
Convenience method for submitting a PUT API request via the `request` method
"""
return self.request(path, method='put', **kwargs)
def post(self, path, **kwargs):
"""
Convenience method for submitting a POST API request via the `request` method
"""
return self.request(path, method='post', **kwargs)
def delete(self, path, **kwargs):
"""
Convenience method for submitting a DELETE API request via the `request` method
"""
return self.request(path, method='delete', **kwargs)
def activities(self, activity_id=None, **kwargs):
"""
Fetch platform activity information
:param activity_id: the id of a specific platform activity that should be retrieved.
If omitted, an index listing of activities is returned. If included
other keyword arguments are ignored.
Keyword arguments that may be used to refine an activity search:
:param parent_id: The parent activity id of the activities. This is used to track
child activities that are the result of a batch operation.
"""
path = self.activities_url.format(activity_id if activity_id else '')
return self.get(path, **kwargs)
def cash_prices(self, **kwargs):
"""
Fetch cash price information
"""
return self.get(self.prices_cash_url, **kwargs)
def ccd(self, ccd_request):
"""
Submit a continuity of care document (CCD) request
:param ccd_request: dictionary representing a CCD request
"""
return self.post(self.ccd_url, data=ccd_request)
def claims(self, claims_request):
"""
Submit a claims request
:param claims_request: dictionary representing a claims request
"""
return self.post(self.claims_url, data=claims_request)
def claims_convert(self, x12_claims_file):
"""
Submit a raw X12 837 file to convert to a claims API request and map any ICD-9 codes to ICD-10
:param x12_claims_file: the path to a X12 claims file to be submitted to the platform for processing
"""
return self.post(self.claims_convert_url, files={
'file': (os.path.split(x12_claims_file)[-1], open(x12_claims_file, 'rb'), 'application/EDI-X12')
})
def claims_status(self, claims_status_request):
"""
Submit a claims status request
:param claims_status_request: dictionary representing a claims status request
"""
return self.post(self.claims_status_url, data=claims_status_request)
def mpc(self, code=None, **kwargs):
"""
Access clinical and consumer friendly information related to medical procedures
:param code: A specific procedure code that should be used to retrieve information
Keyword arguments that may be used to refine a medical procedure search:
:param name: Search medical procedure information by consumer friendly name
:param description: A partial or full description to be used to locate medical procedure information
"""
path = self.mpc_url.format(code if code else '')
return self.get(path, **kwargs)
def icd_convert(self, code):
"""
Locate the appropriate diagnosis mapping for the specified ICD-9 code
:param code: A diagnosis code that should be used to retrieve information
"""
return self.get(self.icd_url.format(code))
def eligibility(self, eligibility_request):
"""
Submit an eligibility request
:param eligibility_request: dictionary representing an eligibility request
"""
return self.post(self.eligibility_url, data=eligibility_request)
def enrollment(self, enrollment_request):
"""
Submit a benefits enrollment/maintenance request
:param enrollment_request: dictionary representing an enrollment request
"""
return self.post(self.enrollment_url, data=enrollment_request)
def enrollment_snapshot(self, trading_partner_id, x12_file):
"""
Submit a X12 834 file to the platform to establish the enrollment information within it
as the current membership enrollment snapshot for a trading partner
:param trading_partner_id: the trading partner associated with the enrollment snapshot
:param x12_file: the path to a X12 834 file that contains the current membership enrollment information
"""
return self.post(self.enrollment_snapshot_url, data={'trading_partner_id': trading_partner_id},
files={
'file': (os.path.split(x12_file)[-1], open(x12_file, 'rb'), 'application/EDI-X12')
})
def enrollment_snapshots(self, snapshot_id=None, **kwargs):
"""
List enrollment snapshots that are stored for the client application
"""
path = self.enrollment_snapshot_url
if snapshot_id:
path += "/{0}".format(snapshot_id)
return self.get(path, **kwargs)
def enrollment_snapshot_data(self, snapshot_id, **kwargs):
"""
List enrollment request objects that make up the specified enrollment snapshot
:param snapshot_id: the enrollment snapshot id for the enrollment data
"""
path = self.enrollment_snapshot_data_url.format(snapshot_id)
return self.get(path, **kwargs)
def insurance_prices(self, **kwargs):
"""
Fetch insurance price information
"""
return self.get(self.prices_insurance_url, **kwargs)
def oop_insurance_prices(self, request_data):
"""
Loads procedure prices for a specific trading partner
"""
return self.post(self.oop_insurance_price_url, data=request_data)
def oop_insurance_delete_price(self, load_price_uuid, request_data=None):
"""
Delete a procedure price for a specific trading partner
"""
path = "{0}/{1}".format(self.oop_insurance_price_url, str(load_price_uuid))
return self.delete(path, data=request_data)
def oop_insurance_estimate(self, request_data):
"""
Returns estimated out of pocket cost and eligibility information for a given procedure
"""
return self.post(self.oop_insurance_estimate_url, data=request_data)
# BACKWARDS COMPATIBILITY AND FEATURE DEPRECATION NOTICE:
def payers(self, **kwargs):
"""
Fetch payer information for supported trading partners
"""
warn(DeprecationWarning('This convenience function will be deprecated '
'in an upcoming release. Use trading_partners instead.'), stacklevel=2)
return self.get('/payers/', **kwargs)
def plans(self, **kwargs):
"""
Fetch insurance plans information
"""
return self.get(self.plans_url, **kwargs)
def providers(self, npi=None, **kwargs):
"""
Search health care providers in the PokitDok directory
:param npi: The National Provider Identifier for an Individual Provider or Organization
When a NPI value is specified, no other parameters will be considered.
Keyword arguments that may be used to refine a providers search:
:param address_lines: Any or all of number, street name, apartment, suite number
:param zipcode: Zip code to search in
:param city: City to search in
:param state: State to search in
:param radius: A value representing the search distance from a geographic center point
May be expressed in miles like: 10mi. zipcode or city and state must
be provided to enable distance sorting with specified radius
:param first_name: The first name of a provider to include in the search criteria
:param last_name: The last name of a provider to include in the search criteria
:param organization_name: The organization_name of a provider. Do not pass first_name
or last_name with this argument
:param limit: The number of provider results that should be included in search results
:param sort: Accepted values include 'distance' (default) or 'rank'. 'distance' sort
requires city & state or zipcode parameters otherwise sort will be 'rank'.
"""
path = self.providers_url.format(npi if npi else '')
return self.get(path, **kwargs)
def trading_partners(self, trading_partner_id=None):
"""
Search trading partners in the PokitDok Platform
:param trading_partner_id: the ID used by PokitDok to uniquely identify a trading partner
:returns a dictionary containing the specified trading partner or, if called with no arguments, a list of
available trading partners
"""
path = self.trading_partners_url.format(trading_partner_id if trading_partner_id else '')
return self.get(path)
def schedulers(self, scheduler_uuid=None):
"""
Get information about supported scheduling systems or fetch data about a specific scheduling system
:param scheduler_uuid: The uuid of a specific scheduling system.
"""
path = self.schedulers_url.format(scheduler_uuid if scheduler_uuid else '')
return self.get(path)
def appointment_types(self, appointment_type_uuid=None):
"""
Get information about appointment types or fetch data about a specific appointment type
:param appointment_type_uuid: The uuid of a specific appointment type.
"""
path = self.appointment_types_url.format(appointment_type_uuid if appointment_type_uuid else '')
return self.get(path)
def schedule_slots(self, slots_request):
"""
Submit an open slot for a provider's schedule
:param slots_request: dictionary representing a slots request
"""
return self.post(self.schedule_slots_url, data=slots_request)
def get_appointments(self, appointment_uuid=None, **kwargs):
"""
Query for open appointment slots or retrieve information for a specific appointment
:param appointment_uuid: The uuid of a specific appointment.
"""
path = self.appointments_url.format(appointment_uuid if appointment_uuid else '')
return self.get(path, **kwargs)
# BACKWARDS COMPATIBILITY AND FEATURE DEPRECATION NOTICE:
def appointments(self, appointment_uuid=None, **kwargs):
warn(DeprecationWarning('This convenience function will be deprecated '
'in an upcoming release. Use get_appointments instead.'), stacklevel=2)
return self.get_appointments(appointment_uuid, **kwargs)
def book_appointment(self, appointment_uuid, appointment_request):
"""
Book an appointment
:param appointment_uuid: The uuid of a specific appointment to be booked.
:param appointment_request: the appointment request data
"""
path = self.appointments_url.format(appointment_uuid)
return self.put(path, data=appointment_request)
update_appointment = book_appointment
def cancel_appointment(self, appointment_uuid):
"""
Cancel an appointment
:param appointment_uuid: The uuid of a specific appointment.
"""
path = self.appointments_url.format(appointment_uuid)
return self.delete(path)
def create_identity(self, identity_request):
"""
Creates an identity resource.
:param identity_request: The dictionary containing the identity request data.
:returns: The new identity resource.
"""
return self.post(self.identity_post_url, data=identity_request)
def update_identity(self, identity_uuid, identity_request):
"""
Updates an existing identity resource.
:param identity_uuid: The identity resource's uuid.
:param identity_request: The updated identity resource.
:returns: The updated identity resource.
"""
path = self.identity_put_url.format(identity_uuid)
return self.put(path, data=identity_request)
def get_identity(self, identity_uuid=None, **kwargs):
"""
Queries for an existing identity resource by uuid or for multiple resources using parameters.
:uuid: The identity resource uuid. Used to execute an exact match query by uuid.
:kwargs: Additional query parameters using resource fields such as first_name, last_name, email, etc.
:returns: list containing the search results. A search by uuid returns an empty list or a list containing
a single identity record.
"""
path = self.identity_get_url
if identity_uuid:
path += '/{0}'.format(identity_uuid)
return self.get(path, **kwargs)
# BACKWARDS COMPATIBILITY AND FEATURE DEPRECATION NOTICE:
def identity(self, identity_uuid=None, **kwargs):
warn(DeprecationWarning('This convenience function will be deprecated '
'in an upcoming release. Use get_identity instead.'), stacklevel=2)
return self.get_identity(identity_uuid, **kwargs)
def validate_identity(self, identity_payload):
"""
Tests the validity of an identity through the Identity Proof api (our knowledge based authentication solution)
:param identity_payload:
:return: validation_response
"""
return self.post(self.identity_proof_valid_url, data=identity_payload)
def create_proof_questionnaire(self, identity_payload):
"""
Validates an identity proof request and generates a Knowledge Based Authentication questionnaire if possible
:return: questionnaire_response
"""
return self.post(self.identity_proof_generate_url, data=identity_payload)
def answer_proof_question(self, answer_request):
"""
Submit a user’s response to a knowledge based authentication question
:return: the answer response
"""
return self.post(self.identity_proof_score_url, data=answer_request)
def identity_history(self, identity_uuid, historical_version=None):
"""
Queries for an identity record's history.
Returns a history summary including the insert date and version number or a specific record version, if
the historical_version argument is provided.
:param identity_uuid: The identity resource's uuid.
:param historical_version: The historical version id. Used to return a historical identity record
:return: history result (list)
"""
path = self.identity_history_url.format(self.url_base, str(identity_uuid))
if historical_version is not None:
path = "{0}/{1}".format(path, historical_version)
return self.api_client.get(path, headers=self.base_headers).json()
def identity_match(self, identity_match_data):
"""
Creates an identity match job.
:param identity_match_data: The dictionary containing the identity match data.
:returns: An activity id of the identity match job
"""
return self.post(self.identity_match_url, data=identity_match_data)
def pharmacy_plans(self, **kwargs):
"""
Search drug plan information by trading partner and various plan identifiers
:param kwargs: pharmacy plans API request parameters
:return: drug plan information if a match is found
"""
return self.get(self.pharmacy_plans_url, **kwargs)
def pharmacy_formulary(self, **kwargs):
"""
Search drug plan formulary information to determine if a drug is covered by the specified
drug plan.
:param kwargs: pharmacy formulary API request parameters
:return: formulary information if a match is found
"""
return self.get(self.pharmacy_formulary_url, **kwargs)
def pharmacy_network(self, npi=None, **kwargs):
"""
Search for in-network pharmacies
:param npi: The National Provider Identifier for a pharmacy
:param kwargs: pharmacy network API request parameters
:return: If an NPI is included in the request, details about the pharmacy are returned.
Otherwise, a list of in-network pharmacies is returned.
"""
path = self.pharmacy_network_url
if npi:
path += '/{0}'.format(npi)
return self.get(path, **kwargs)
| {
"content_hash": "40acf89b5c42235905948d8a5ca0f953",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 121,
"avg_line_length": 45.86355785837651,
"alnum_prop": 0.6234983995481077,
"repo_name": "pokitdok/pokitdok-python",
"id": "e3f96b398a791f0f8338bc5d5bfe17e20819a862",
"size": "26758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pokitdok/api/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9858"
},
{
"name": "Python",
"bytes": "50102"
},
{
"name": "Shell",
"bytes": "554"
}
],
"symlink_target": ""
} |
import socket
from flask import render_template, request, json
from flask.ext import login
from .app import app
index_fields = ['name', 'group', 'status', 'comments', 'rate', 'burst', 'updatetime']
@app.route('/')
def index():
projectdb = app.config['projectdb']
return render_template("index.html", projects=projectdb.get_all(fields=index_fields))
@app.route('/update', methods=['POST', ])
def project_update():
projectdb = app.config['projectdb']
project = request.form['pk']
name = request.form['name']
value = request.form['value']
project_info = projectdb.get(project, fields=('name', 'group'))
if not project_info:
return "no such project.", 404
if 'lock' in projectdb.split_group(project_info.get('group')) \
and not login.current_user.is_active():
return app.login_response
if name not in ('group', 'status', 'rate'):
return 'unknown field: %s' % name, 400
if name == 'rate':
value = value.split('/')
if len(value) != 2:
return 'format error: rate/burst', 400
rate = float(value[0])
burst = float(value[1])
update = {
'rate': min(rate, app.config.get('max_rate', rate)),
'burst': min(burst, app.config.get('max_burst', burst)),
}
else:
update = {
name: value
}
ret = projectdb.update(project, update)
if ret:
rpc = app.config['scheduler_rpc']
if rpc is not None:
try:
rpc.update_project()
except socket.error as e:
app.logger.warning('connect to scheduler rpc error: %r', e)
return 'rpc error', 200
return 'ok', 200
else:
return 'update error', 500
@app.route('/counter')
def counter():
rpc = app.config['scheduler_rpc']
if rpc is None:
return json.dumps({})
time = request.args['time']
type = request.args.get('type', 'sum')
try:
return json.dumps(rpc.counter(time, type)), 200, {'Content-Type': 'application/json'}
except socket.error as e:
app.logger.warning('connect to scheduler rpc error: %r', e)
return json.dumps({}), 200, {'Content-Type': 'application/json'}
@app.route('/run', methods=['POST', ])
def runtask():
rpc = app.config['scheduler_rpc']
if rpc is None:
return json.dumps({})
projectdb = app.config['projectdb']
project = request.form['project']
project_info = projectdb.get(project, fields=('name', 'group'))
if not project_info:
return "no such project.", 404
if 'lock' in projectdb.split_group(project_info.get('group')) \
and not login.current_user.is_active():
return app.login_response
newtask = {
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
}
try:
ret = rpc.newtask(newtask)
except socket.error as e:
app.logger.warning('connect to scheduler rpc error: %r', e)
return json.dumps({"result": False}), 200, {'Content-Type': 'application/json'}
return json.dumps({"result": ret}), 200, {'Content-Type': 'application/json'}
@app.route('/robots.txt')
def robots():
return """User-agent: *
Disallow: /
Allow: /$
Allow: /debug
Disallow: /debug/*?taskid=*
""", 200, {'Content-Type': 'text/plain'}
| {
"content_hash": "d5a858e51ba36ac81b89cc7e628b4fa2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 93,
"avg_line_length": 29.355371900826448,
"alnum_prop": 0.5723536036036037,
"repo_name": "willworks/pyspider",
"id": "e53c952d00a632d3efba6d72ed2c60986a2e30c2",
"size": "3737",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pyspider/webui/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "679"
},
{
"name": "CSS",
"bytes": "21362"
},
{
"name": "HTML",
"bytes": "20154"
},
{
"name": "JavaScript",
"bytes": "36774"
},
{
"name": "Python",
"bytes": "384603"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth import models as authmodels
from django.conf import settings
from signbank.video.fields import VideoUploadToFLVField
from signbank.dictionary.models import *
# models to represent the feedback from users in the site
import string
def t(message):
"""Replace $country and $language in message with dat from settings"""
tpl = string.Template(message)
return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)
from django import forms
STATUS_CHOICES = ( ('unread', 'unread'),
('read', 'read'),
('deleted', 'deleted'),
)
class InterpreterFeedback(models.Model):
"""Feedback on a sign from an interpreter"""
class Meta:
ordering = ['-date']
permissions = (('view_interpreterfeedback', "Can View Interpreter Feedback"),)
gloss = models.ForeignKey(Gloss)
comment = models.TextField('Note')
user = models.ForeignKey(authmodels.User)
date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='unread')
class InterpreterFeedbackForm(forms.ModelForm):
class Meta:
model = InterpreterFeedback
fields = ['comment']
widgets={'comment': forms.Textarea(attrs={'cols': 30, 'rows': 2})}
class GeneralFeedback(models.Model):
comment = models.TextField(blank=True)
video = models.FileField(upload_to=settings.COMMENT_VIDEO_LOCATION, blank=True)
user = models.ForeignKey(authmodels.User)
date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='unread')
class Meta:
ordering = ['-date']
class GeneralFeedbackForm(forms.Form):
"""Form for general feedback"""
comment = forms.CharField(widget=forms.Textarea(attrs={'cols':'64'}), required=False)
video = VideoUploadToFLVField(required=False, widget=forms.FileInput(attrs={'size':'60'}))
isAuslanChoices = ( (1, "yes"),
(2, "Perhaps"),
(3, "Don't know"),
(4, "Don't think so"),
(5, "No"),
(0, "N/A")
)
if settings.LANGUAGE_NAME == "BSL":
whereusedChoices = (('Belfast', 'Belfast'),
('Birmingham', 'Birmingham'),
('Bristol', 'Bristol'),
('Cardiff', 'Cardiff'),
('Glasgow', 'Glasgow'),
('London', 'London'),
('Manchester', 'Manchester'),
('Newcastle', 'Newcastle'),
('Other', 'Other (note in comments)'),
("Don't Know", "Don't Know"),
('N/A', 'N/A'),
)
else:
whereusedChoices = (('auswide', 'Australia Wide'),
('dialectN', 'Dialect Sign (North)'),
('dialectS', 'Dialect Sign (South)'),
('nsw', "New South Wales"),
('vic', "Victoria"),
('qld', "Queensland"),
('wa', "Western Australia"),
('sa', "South Australia"),
('tas', "Tasmania"),
('nt', "Northern Territory"),
('act', "Australian Capital Territory"),
('dk', "Don't Know"),
('n/a', "N/A")
)
likedChoices = ( (1, "yes"),
(2, "A little"),
(3, "Don't care"),
(4, "Not much"),
(5, "No"),
(0, "N/A")
)
useChoices = ( (1, "yes"),
(2, "Sometimes"),
(3, "Not Often"),
(4, "No"),
(0, "N/A")
)
suggestedChoices =( (1, "yes"),
(2, "Sometimes"),
(3, "Don't Know"),
(4, "Perhaps"),
(5, "No"),
(0, "N/A")
)
correctChoices = ( (1, "yes"),
(2, "Mostly Correct"),
(3, "Don't Know"),
(4, "Mostly Wrong"),
(5, "No"),
(0, "N/A")
)
class SignFeedback(models.Model):
"""Store feedback on a particular sign"""
user = models.ForeignKey(authmodels.User, editable=False)
date = models.DateTimeField(auto_now_add=True)
translation = models.ForeignKey(Translation, editable=False)
comment = models.TextField("Please give us your comments about this sign. For example: do you think there are other keywords that belong with this sign? Please write your comments or new keyword/s below.", blank=True)
kwnotbelong = models.TextField("Is there a keyword or keyword/s that DO NOT belong with this sign? Please provide the list of keywords below", blank=True)
isAuslan = models.IntegerField(t("Is this sign an $language Sign?"), choices=isAuslanChoices)
whereused = models.CharField("Where is this sign used?", max_length=10, choices=whereusedChoices)
like = models.IntegerField("Do you like this sign?", choices=likedChoices)
use = models.IntegerField("Do you use this sign?", choices=useChoices)
suggested = models.IntegerField("If this sign is a suggested new sign, would you use it?", default=3, choices=suggestedChoices)
correct = models.IntegerField("Is the information about the sign correct?", choices=correctChoices)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='unread')
def __str__(self):
return str(self.translation.translation) + " by " + str(self.user) + " on " + str(self.date)
class Meta:
ordering = ['-date']
class SignFeedbackForm(forms.Form):
"""Form for input of sign feedback"""
isAuslan = forms.ChoiceField(choices=isAuslanChoices, initial=0, widget=forms.RadioSelect)
#isAuslan = forms.IntegerField(initial=0, widget=forms.HiddenInput)
whereused = forms.ChoiceField(choices=whereusedChoices, initial="n/a")
#whereused = forms.CharField(initial='n/a', widget=forms.HiddenInput)
like = forms.ChoiceField(choices=likedChoices, initial=0, widget=forms.RadioSelect)
#like = forms.IntegerField(initial=0, widget=forms.HiddenInput)
use = forms.ChoiceField(choices=useChoices, initial=0, widget=forms.RadioSelect)
#use = forms.IntegerField(initial=0, widget=forms.HiddenInput)
suggested = forms.ChoiceField(choices=suggestedChoices, initial=3, required=False, widget=forms.RadioSelect)
#suggested = forms.IntegerField(initial=0, widget=forms.HiddenInput)
correct = forms.ChoiceField(choices=correctChoices, initial=0, widget=forms.RadioSelect)
#correct = forms.IntegerField(initial=0, widget=forms.HiddenInput)
kwnotbelong = forms.CharField(label="List keywords", required=False, widget=forms.Textarea)
comment = forms.CharField(required=False, widget=forms.Textarea)
handformChoices = (
(1, 'One handed'),
(2, 'Two handed (same shape for each hand)'),
(3, 'Two handed (diffent shapes for each hand)')
)
handshapeChoices = ((0, 'None'),
(291, 'Animal'),
(292, 'Animal-flick'),
(293, 'Bad'),
(294, 'Ball'),
(295, 'Cup'),
(296, 'Cup-flush'),
(297, 'Cup-thumb'),
(298, 'Eight'),
(299, 'Eight-hook'),
(300, 'Fist-A'),
(301, 'Fist-S'),
(302, 'Flat'),
(303, 'Flat-bent'),
(304, 'Flat-B'),
(305, 'Flat-flush'),
(306, 'Flick'),
(307, 'Flick-gay'),
(308, 'Four'),
(309, 'Five'),
(310, 'Good'),
(311, 'Good-6'),
(312, 'Gun'),
(313, 'Gun-hook'),
(314, 'Hook'),
(315, 'Kneel'),
(316, 'Letter-C'),
(317, 'Letter-M'),
(318, 'Letter-N'),
(319, 'Love'),
(320, 'Middle'),
(321, 'Mother'),
(322, 'Nine'),
(323, 'Point-1'),
(324, 'Point-D'),
(325, 'Point-flush'),
(326, 'Okay-flat'),
(327, 'Okay-F'),
(328, 'Okay-O'),
(329, 'Old-seven'),
(330, 'Plane'),
(331, 'Perth'),
(332, 'Round-O'),
(333, 'Round-flat'),
(334, 'Round-E'),
(335, 'Rude'),
(336, 'Salt'),
(337, 'Salt-flick'),
(338, 'Small'),
(339, 'Soon'),
(340, 'Spoon'),
(341, 'Spoon-hook'),
(342, 'Spoon-thumb'),
(343, 'Thick'),
(344, 'Three'),
(345, 'Three-hook'),
(346, 'Two'),
(347, 'Wish'),
(348, 'Write'),
(349, 'Write-flat')
)
locationChoices = ((0, 'None'),
(257, 'Top of head'),
(258, 'Forehead'),
(259, 'Temple'),
(260, 'Eyes'),
(261, 'Nose'),
(262, 'Whole of face'),
(263, 'Cheekbone'),
(264, 'Ear'),
(265, 'Cheek'),
(266, 'Mouth and lips'),
(267, 'Chin'),
(268, 'Neck'),
(269, 'Shoulder'),
(270, 'Chest'),
(271, 'Stomach'),
(272, 'Waist'),
(273, 'Lower waist'),
(274, 'Upper arm'),
(275, 'Elbow')
)
handbodycontactChoices = ((0, 'None'),
(240, 'Contact at start of movement'),
(241, 'Contact at end of movement'),
(242, 'Two contacts (tap)'),
(243, 'Contact during (rub/stroke)')
)
directionChoices = ((0, 'None'),
(472, 'Up'),
(473, 'Down'),
(474, 'Up and down'),
(475, 'Left'),
(476, 'Right'),
(477, 'Side to side'),
(478, 'Away'),
(479, 'Towards'),
(480, 'To and fro')
)
movementtypeChoices = ((0, 'None'),
(481, 'Straight'),
(482, 'Curved'),
(483, 'Circle'),
(484, 'Zig-zag')
)
smallmovementChoices = ((0, 'None'),
(485, 'Straighten from bent'),
(486, 'Bend fingers'),
(487, 'Nod at wrist'),
(488, 'Straighten fingers'),
(489, 'Open handshape'),
(490, 'Close handshape'),
(491, 'Wriggle fingers'),
(492, 'Crumble fingers')
)
repetitionChoices = ((0, 'None'),
(493, 'Do the movement once'),
(494, 'Do the movement twice'),
(495, 'Repeat the movement several times')
)
relativelocationChoices = ((0, 'None'),
(283, 'Forearm'),
(284, 'Wrist'),
(285, 'Pulse'),
(286, 'Back of hand'),
(287, 'Palm'),
(288, 'Sides of hand'),
(289, 'Fingertips')
)
handinteractionChoices = ((0, 'None'),
(468, 'Alternate hands (one moves, then the other moves)'),
(469, 'Move the hands towards each other'),
(470, 'Move the hands away from each other'),
(471, 'The hands cross over each other')
)
class MissingSignFeedbackForm(forms.Form):
handform = forms.ChoiceField(choices=handformChoices, required=False,
label='How many hands are used to make this sign?')
handshape = forms.ChoiceField(choices=handshapeChoices, required=False,
label='What is the handshape?')
althandshape = forms.ChoiceField(choices=handshapeChoices, required=False,
label='What is the handshape of the left hand?')
location = forms.ChoiceField(choices=locationChoices, required=False,
label='Choose the location of the sign on, or near the body')
relativelocation = forms.ChoiceField(choices=relativelocationChoices,
label='Choose the location of the right hand on, or near the left hand', required=False)
handbodycontact = forms.ChoiceField(choices=handbodycontactChoices,
label='Contact between hands and body', required=False)
handinteraction = forms.ChoiceField(choices=handinteractionChoices,
label='Interaction between hands', required=False)
direction = forms.ChoiceField(choices=directionChoices,
label='Movement direction of the hand(s)', required=False)
movementtype = forms.ChoiceField(choices=movementtypeChoices,
label='Type of movement', required=False)
smallmovement = forms.ChoiceField(choices=smallmovementChoices,
label='Small movements of the hand(s) and fingers', required=False)
repetition = forms.ChoiceField(choices=repetitionChoices,
label='Number of movements', required=False)
meaning = forms.CharField(label='Sign Meaning',
widget=forms.Textarea(attrs={'cols':'55', 'rows':'8'}))
video = forms.FileField(required=False,
widget=forms.FileInput(attrs={'size':'60'}))
comments = forms.CharField(label='Further Details',
widget=forms.Textarea(attrs={'cols':'55', 'rows':'8'}), required=False)
class MissingSignFeedback(models.Model):
user = models.ForeignKey(authmodels.User)
date = models.DateTimeField(auto_now_add=True)
handform = models.IntegerField(choices=handformChoices, blank=True, default=0)
handshape = models.IntegerField(choices=handshapeChoices, blank=True, default=0)
althandshape = models.IntegerField(choices=handshapeChoices, blank=True, default=0)
location = models.IntegerField(choices=locationChoices, blank=True, default=0)
relativelocation = models.IntegerField(choices=relativelocationChoices, blank=True, default=0)
handbodycontact = models.IntegerField(choices=handbodycontactChoices, blank=True, default=0)
handinteraction = models.IntegerField(choices=handinteractionChoices, blank=True, default=0)
direction = models.IntegerField(choices=directionChoices, blank=True, default=0)
movementtype = models.IntegerField(choices=movementtypeChoices, blank=True, default=0)
smallmovement = models.IntegerField(choices=smallmovementChoices, blank=True, default=0)
repetition = models.IntegerField(choices=repetitionChoices, blank=True, default=0)
meaning = models.TextField()
comments = models.TextField(blank=True)
video = models.FileField(upload_to=settings.COMMENT_VIDEO_LOCATION, blank=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='unread')
class Meta:
ordering = ['-date']
| {
"content_hash": "66eddc137b44cc9286caf4740c97de7d",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 221,
"avg_line_length": 42.91968911917098,
"alnum_prop": 0.49664996680147283,
"repo_name": "Signbank/BSL-signbank",
"id": "0ce25722fab3a5ee218c7edf7f4bbc5df6501ce3",
"size": "16567",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "signbank/feedback/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "1846"
},
{
"name": "CSS",
"bytes": "480831"
},
{
"name": "HTML",
"bytes": "244006"
},
{
"name": "JavaScript",
"bytes": "1011248"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "986206"
}
],
"symlink_target": ""
} |
"""
******
Tables
******
Synapse Tables enable storage of tabular data in Synapse in a form that can be queried using a SQL-like query language.
A table has a :py:class:`Schema` and holds a set of rows conforming to that schema.
A :py:class:`Schema` defines a series of :py:class:`Column` of the following types: STRING, DOUBLE, INTEGER, BOOLEAN,
DATE, ENTITYID, FILEHANDLEID, LINK, LARGETEXT, USERID
~~~~~~~
Example
~~~~~~~
Preliminaries::
import synapseclient
from synapseclient import Project, File, Folder
from synapseclient import Schema, Column, Table, Row, RowSet, as_table_columns
syn = synapseclient.Synapse()
syn.login()
project = syn.get('syn123')
First, let's load some data. Let's say we had a file, genes.csv::
Name,Chromosome,Start,End,Strand,TranscriptionFactor
foo,1,12345,12600,+,False
arg,2,20001,20200,+,False
zap,2,30033,30999,-,False
bah,1,40444,41444,-,False
bnk,1,51234,54567,+,True
xyz,1,61234,68686,+,False
To create a Table::
table = build_table('My Favorite Genes', project, "/path/to/genes.csv")
syn.store(table)
:py:func:`build_table` will set the Table :py:class:`Schema` which defines the columns of the table.
To create a table with a custom :py:class:`Schema`, first create the :py:class:`Schema`::
cols = [
Column(name='Name', columnType='STRING', maximumSize=20),
Column(name='Chromosome', columnType='STRING', maximumSize=20),
Column(name='Start', columnType='INTEGER'),
Column(name='End', columnType='INTEGER'),
Column(name='Strand', columnType='STRING', enumValues=['+', '-'], maximumSize=1),
Column(name='TranscriptionFactor', columnType='BOOLEAN')]
schema = Schema(name='My Favorite Genes', columns=cols, parent=project)
Let's store that in Synapse::
table = Table(schema, "/path/to/genes.csv")
table = syn.store(table)
The :py:func:`Table` function takes two arguments, a schema object and data in some form, which can be:
* a path to a CSV file
* a `Pandas <http://pandas.pydata.org/>`_ \
`DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_
* a :py:class:`RowSet` object
* a list of lists where each of the inner lists is a row
With a bit of luck, we now have a table populated with data. Let's try to query::
results = syn.tableQuery("select * from %s where Chromosome='1' and Start < 41000 and End > 20000"
% table.schema.id)
for row in results:
print(row)
------
Pandas
------
`Pandas <http://pandas.pydata.org/>`_ is a popular library for working with tabular data. If you have Pandas installed,
the goal is that Synapse Tables will play nice with it.
Create a Synapse Table from a `DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_::
import pandas as pd
df = pd.read_csv("/path/to/genes.csv", index_col=False)
table = build_table('My Favorite Genes', project, df)
table = syn.store(table)
:py:func:`build_table` uses pandas DataFrame dtype to set the Table :py:class:`Schema`.
To create a table with a custom :py:class:`Schema`, first create the :py:class:`Schema`::
schema = Schema(name='My Favorite Genes', columns=as_table_columns(df), parent=project)
table = syn.store(Table(schema, df))
Get query results as a `DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_::
results = syn.tableQuery("select * from %s where Chromosome='2'" % table.schema.id)
df = results.asDataFrame()
--------------
Changing Data
--------------
Once the schema is settled, changes come in two flavors: appending new rows and updating existing ones.
**Appending** new rows is fairly straightforward. To continue the previous example, we might add some new genes from
another file::
table = syn.store(Table(table.schema.id, "/path/to/more_genes.csv"))
To quickly add a few rows, use a list of row data::
new_rows = [["Qux1", "4", 201001, 202001, "+", False],
["Qux2", "4", 203001, 204001, "+", False]]
table = syn.store(Table(schema, new_rows))
**Updating** rows requires an etag, which identifies the most recent change set plus row IDs and version numbers for
each row to be modified. We get those by querying before updating. Minimizing changesets to contain only rows that
actually change will make processing faster.
For example, let's update the names of some of our favorite genes::
results = syn.tableQuery("select * from %s where Chromosome='1'" % table.schema.id)
df = results.asDataFrame()
df['Name'] = ['rzing', 'zing1', 'zing2', 'zing3']
Note that we're propagating the etag from the query results. Without it, we'd get an error saying something about an
"Invalid etag"::
table = syn.store(Table(schema, df, etag=results.etag))
The etag is used by the server to prevent concurrent users from making conflicting changes, a technique called
optimistic concurrency. In case of a conflict, your update may be rejected. You then have to do another query and
try your update again.
------------------------
Changing Table Structure
------------------------
Adding columns can be done using the methods :py:meth:`Schema.addColumn` or :py:meth:`addColumns` on the
:py:class:`Schema` object::
schema = syn.get("syn000000")
bday_column = syn.store(Column(name='birthday', columnType='DATE'))
schema.addColumn(bday_column)
schema = syn.store(schema)
Renaming or otherwise modifying a column involves removing the column and adding a new column::
cols = syn.getTableColumns(schema)
for col in cols:
if col.name == "birthday":
schema.removeColumn(col)
bday_column2 = syn.store(Column(name='birthday2', columnType='DATE'))
schema.addColumn(bday_column2)
schema = syn.store(schema)
--------------------
Table attached files
--------------------
Synapse tables support a special column type called 'File' which contain a file handle, an identifier of a file stored
in Synapse. Here's an example of how to upload files into Synapse, associate them with a table and read them back
later::
# your synapse project
project = syn.get(...)
covers_dir = '/path/to/album/covers/'
# store the table's schema
cols = [
Column(name='artist', columnType='STRING', maximumSize=50),
Column(name='album', columnType='STRING', maximumSize=50),
Column(name='year', columnType='INTEGER'),
Column(name='catalog', columnType='STRING', maximumSize=50),
Column(name='cover', columnType='FILEHANDLEID')]
schema = syn.store(Schema(name='Jazz Albums', columns=cols, parent=project))
# the actual data
data = [["John Coltrane", "Blue Train", 1957, "BLP 1577", "coltraneBlueTrain.jpg"],
["Sonny Rollins", "Vol. 2", 1957, "BLP 1558", "rollinsBN1558.jpg"],
["Sonny Rollins", "Newk's Time", 1958, "BLP 4001", "rollinsBN4001.jpg"],
["Kenny Burrel", "Kenny Burrel", 1956, "BLP 1543", "burrellWarholBN1543.jpg"]]
# upload album covers
for row in data:
file_handle = syn.uploadSynapseManagedFileHandle(os.path.join(covers_dir, row[4]))
row[4] = file_handle['id']
# store the table data
row_reference_set = syn.store(RowSet(columns=cols, schema=schema, rows=[Row(r) for r in data]))
# Later, we'll want to query the table and download our album covers
results = syn.tableQuery("select artist, album, 'year', catalog, cover from %s where artist = 'Sonny Rollins'" \
% schema.id)
cover_files = syn.downloadTableColumns(results, ['cover'])
-------------
Deleting rows
-------------
Query for the rows you want to delete and call syn.delete on the results::
results = syn.tableQuery("select * from %s where Chromosome='2'" % table.schema.id)
a = syn.delete(results)
------------------------
Deleting the whole table
------------------------
Deleting the schema deletes the whole table and all rows::
syn.delete(schema)
~~~~~~~
Queries
~~~~~~~
The query language is quite similar to SQL select statements, except that joins are not supported. The documentation
for the Synapse API has lots of `query examples \
<http://docs.synapse.org/rest/org/sagebionetworks/repo/web/controller/TableExamples.html>`_.
~~~~~~
Schema
~~~~~~
.. autoclass:: synapseclient.table.Schema
:members:
:noindex:
.. autoclass:: synapseclient.table.EntityViewSchema
:members:
:noindex:
~~~~~~
Column
~~~~~~
.. autoclass:: synapseclient.table.Column
:members: __init__
~~~~~~
Row
~~~~~~
.. autoclass:: synapseclient.table.Row
:members: __init__
~~~~~~
RowSet
~~~~~~
.. autoclass:: synapseclient.table.RowSet
:members: __init__
~~~~~~
Table
~~~~~~
.. autoclass:: synapseclient.table.TableAbstractBaseClass
:members:
.. autoclass:: synapseclient.table.RowSetTable
:members:
.. autoclass:: synapseclient.table.TableQueryResult
:members:
.. autoclass:: synapseclient.table.CsvFileTable
:members:
~~~~~~~~~~~~~~~~~~~~
Module level methods
~~~~~~~~~~~~~~~~~~~~
.. autofunction:: as_table_columns
.. autofunction:: build_table
.. autofunction:: Table
See also:
- :py:meth:`synapseclient.Synapse.getColumns`
- :py:meth:`synapseclient.Synapse.getTableColumns`
- :py:meth:`synapseclient.Synapse.tableQuery`
- :py:meth:`synapseclient.Synapse.get`
- :py:meth:`synapseclient.Synapse.store`
- :py:meth:`synapseclient.Synapse.delete`
"""
import collections.abc
import csv
import io
import os
import re
import tempfile
import copy
import itertools
import collections
import abc
import enum
import json
from builtins import zip
from typing import List, Dict
from synapseclient.core.utils import id_of, itersubclasses, from_unix_epoch_time
from synapseclient.core.exceptions import SynapseError
from synapseclient.core.models.dict_object import DictObject
from .entity import Entity, entity_type_to_class
from synapseclient.core.constants import concrete_types
aggregate_pattern = re.compile(r'(count|max|min|avg|sum)\((.+)\)')
# default is STRING, only need to put the non-STRING keys in here
PANDAS_TABLE_TYPE = {
'floating': 'DOUBLE',
'decimal': 'DOUBLE',
'integer': 'INTEGER',
'boolean': 'BOOLEAN',
'datetime64': 'DATE',
'datetime': 'DATE',
'date': 'DATE',
}
# These are all the synapse columns that are lists
# Be sure to edit the values in the `cast_values` function as well
# when lists column types are added
LIST_COLUMN_TYPES = {
'STRING_LIST',
'INTEGER_LIST',
'BOOLEAN_LIST',
'DATE_LIST',
'ENTITYID_LIST',
'USERID_LIST'
}
MAX_NUM_TABLE_COLUMNS = 152
DEFAULT_QUOTE_CHARACTER = '"'
DEFAULT_SEPARATOR = ","
DEFAULT_ESCAPSE_CHAR = "\\"
# This Enum is used to help users determine which Entity types they want in their view
# Each item will be used to construct the viewTypeMask
class EntityViewType(enum.Enum):
FILE = 0x01
PROJECT = 0x02
TABLE = 0x04
FOLDER = 0x08
VIEW = 0x10
DOCKER = 0x20
SUBMISSION_VIEW = 0x40
DATASET = 0x80
DATASET_COLLECTION = 0x100
MATERIALIZED_VIEW = 0x200
def _get_view_type_mask(types_to_include):
if not types_to_include:
raise ValueError("Please include at least one of the entity types specified in EntityViewType.")
mask = 0x00
for input in types_to_include:
if not isinstance(input, EntityViewType):
raise ValueError("Please use EntityViewType to specify the type you want to include in a View.")
mask = mask | input.value
return mask
def _get_view_type_mask_for_deprecated_type(type):
if not type:
raise ValueError("Please specify the deprecated type to convert to viewTypeMask")
if type == 'file':
return EntityViewType.FILE.value
if type == 'project':
return EntityViewType.PROJECT.value
if type == 'file_and_table':
return EntityViewType.FILE.value | EntityViewType.TABLE.value
raise ValueError("The provided value is not a valid type: %s", type)
def test_import_pandas():
try:
import pandas as pd # noqa F401
# used to catch when pandas isn't installed
except ModuleNotFoundError:
raise ModuleNotFoundError("""\n\nThe pandas package is required for this function!\n
Most functions in the synapseclient package don't require the
installation of pandas, but some do. Please refer to the installation
instructions at: http://pandas.pydata.org/.
\n\n\n""")
# catch other errors (see SYNPY-177)
except: # noqa
raise
def as_table_columns(values):
"""
Return a list of Synapse table :py:class:`Column` objects that correspond to the columns in the given values.
:params values: an object that holds the content of the tables
- a string holding the path to a CSV file, a filehandle, or StringIO containing valid csv content
- a Pandas `DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_
:returns: A list of Synapse table :py:class:`Column` objects
Example::
import pandas as pd
df = pd.DataFrame(dict(a=[1, 2, 3], b=["c", "d", "e"]))
cols = as_table_columns(df)
"""
test_import_pandas()
import pandas as pd
from pandas.api.types import infer_dtype
df = None
# pandas DataFrame
if isinstance(values, pd.DataFrame):
df = values
# filename of a csv file
# in Python 3, we can check that the values is instanceof io.IOBase
# for now, check if values has attr `read`
elif isinstance(values, str) or hasattr(values, "read"):
df = _csv_to_pandas_df(values)
if df is None:
raise ValueError("Values of type %s is not yet supported." % type(values))
cols = list()
for col in df:
inferred_type = infer_dtype(df[col], skipna=True)
columnType = PANDAS_TABLE_TYPE.get(inferred_type, 'STRING')
if columnType == 'STRING':
maxStrLen = df[col].str.len().max()
if maxStrLen > 1000:
cols.append(Column(name=col, columnType='LARGETEXT', defaultValue=''))
else:
size = int(round(min(1000, max(30, maxStrLen*1.5)))) # Determine the length of the longest string
cols.append(Column(name=col, columnType=columnType, maximumSize=size, defaultValue=''))
else:
cols.append(Column(name=col, columnType=columnType))
return cols
def df2Table(df, syn, tableName, parentProject):
"""Creates a new table from data in pandas data frame.
parameters: df, tableName, parentProject
"""
# Create columns:
cols = as_table_columns(df)
cols = [syn.store(col) for col in cols]
# Create Table Schema
schema1 = Schema(name=tableName, columns=cols, parent=parentProject)
schema1 = syn.store(schema1)
# Add data to Table
for i in range(0, df.shape[0]/1200+1):
start = i*1200
end = min((i+1)*1200, df.shape[0])
rowset1 = RowSet(columns=cols, schema=schema1,
rows=[Row(list(df.ix[j, :])) for j in range(start, end)])
syn.store(rowset1)
return schema1
def to_boolean(value):
"""
Convert a string to boolean, case insensitively,
where true values are: true, t, and 1 and false values are: false, f, 0.
Raise a ValueError for all other values.
"""
if isinstance(value, bool):
return value
if isinstance(value, str):
lower_value = value.lower()
if lower_value in ['true', 't', '1']:
return True
if lower_value in ['false', 'f', '0']:
return False
raise ValueError("Can't convert %s to boolean." % value)
def column_ids(columns):
if columns is None:
return []
return [col.id for col in columns if 'id' in col]
def row_labels_from_id_and_version(rows):
return ["_".join(map(str, row)) for row in rows]
def row_labels_from_rows(rows):
return row_labels_from_id_and_version([(row['rowId'], row['versionNumber'], row['etag'])
if 'etag' in row else (row['rowId'], row['versionNumber'])
for row in rows])
def cast_values(values, headers):
"""
Convert a row of table query results from strings to the correct column type.
See: http://docs.synapse.org/rest/org/sagebionetworks/repo/model/table/ColumnType.html
"""
if len(values) != len(headers):
raise ValueError('The number of columns in the csv file does not match the given headers. %d fields, %d headers'
% (len(values), len(headers)))
result = []
for header, field in zip(headers, values):
columnType = header.get('columnType', 'STRING')
# convert field to column type
if field is None or field == '':
result.append(None)
elif columnType in {'STRING', 'ENTITYID', 'FILEHANDLEID', 'LARGETEXT', 'USERID', 'LINK'}:
result.append(field)
elif columnType == 'DOUBLE':
result.append(float(field))
elif columnType == 'INTEGER':
result.append(int(field))
elif columnType == 'BOOLEAN':
result.append(to_boolean(field))
elif columnType == 'DATE':
result.append(from_unix_epoch_time(field))
elif columnType in {'STRING_LIST', 'INTEGER_LIST', 'BOOLEAN_LIST',
'ENTITYID_LIST', 'USERID_LIST'}:
result.append(json.loads(field))
elif columnType == 'DATE_LIST':
result.append(json.loads(field, parse_int=from_unix_epoch_time))
else:
# default to string for unknown column type
result.append(field)
return result
def cast_row(row, headers):
row['values'] = cast_values(row['values'], headers)
return row
def cast_row_set(rowset):
for i, row in enumerate(rowset['rows']):
rowset['rows'][i]['values'] = cast_row(row, rowset['headers'])
return rowset
def escape_column_name(column):
"""Escape the name of the given column for use in a Synapse table query statement
:param column: a string or column dictionary object with a 'name' key"""
col_name = column['name'] if isinstance(column, collections.abc.Mapping) else str(column)
escaped_name = col_name.replace('"', '""')
return f'"{escaped_name}"'
def join_column_names(columns):
"""Join the names of the given columns into a comma delimited list suitable for use in a Synapse table query
:param columns: a sequence of column string names or dictionary objets with column 'name' keys"""
return ",".join(escape_column_name(c) for c in columns)
def _csv_to_pandas_df(filepath,
separator=DEFAULT_SEPARATOR,
quote_char=DEFAULT_QUOTE_CHARACTER,
escape_char=DEFAULT_ESCAPSE_CHAR,
contain_headers=True,
lines_to_skip=0,
date_columns=None,
list_columns=None,
rowIdAndVersionInIndex=True,
dtype=None):
test_import_pandas()
import pandas as pd
# DATEs are stored in csv as unix timestamp in milliseconds
def datetime_millisecond_parser(milliseconds): return pd.to_datetime(milliseconds, unit='ms', utc=True)
if not date_columns:
date_columns = []
line_terminator = str(os.linesep)
# assign line terminator only if for single character
# line terminators (e.g. not '\r\n') 'cause pandas doesn't
# longer line terminators. See:
# https://github.com/pydata/pandas/issues/3501
# "ValueError: Only length-1 line terminators supported"
df = pd.read_csv(filepath,
dtype=dtype,
sep=separator,
lineterminator=line_terminator if len(line_terminator) == 1 else None,
quotechar=quote_char,
escapechar=escape_char,
header=0 if contain_headers else None,
skiprows=lines_to_skip,
parse_dates=date_columns,
date_parser=datetime_millisecond_parser)
# Turn list columns into lists
if list_columns:
for col in list_columns:
# Fill NA values with empty lists, it must be a string for json.loads to work
df[col].fillna('[]', inplace=True)
df[col] = df[col].apply(json.loads)
if rowIdAndVersionInIndex and "ROW_ID" in df.columns and "ROW_VERSION" in df.columns:
# combine row-ids (in index) and row-versions (in column 0) to
# make new row labels consisting of the row id and version
# separated by a dash.
zip_args = [df["ROW_ID"], df["ROW_VERSION"]]
if "ROW_ETAG" in df.columns:
zip_args.append(df['ROW_ETAG'])
df.index = row_labels_from_id_and_version(zip(*zip_args))
del df["ROW_ID"]
del df["ROW_VERSION"]
if "ROW_ETAG" in df.columns:
del df['ROW_ETAG']
return df
def _create_row_delete_csv(row_id_vers_iterable):
"""
creates a temporary csv used for deleting rows
:param row_id_vers_iterable: an iterable containing tuples with format: (row_id, row_version)
:return: filepath of created csv file
"""
with tempfile.NamedTemporaryFile("w", suffix=".csv", delete=False) as temp_csv:
csv_writer = csv.writer(temp_csv)
csv_writer.writerow(("ROW_ID", "ROW_VERSION"))
csv_writer.writerows(row_id_vers_iterable)
return temp_csv.name
def _delete_rows(syn, schema, row_id_vers_list):
"""
Deletes rows from a synapse table
:param syn: an instance of py:class:`synapseclient.client.Synapse`
:param row_id_vers_list: an iterable containing tuples with format: (row_id, row_version)
"""
delete_row_csv_filepath = _create_row_delete_csv(row_id_vers_list)
try:
syn._uploadCsv(delete_row_csv_filepath, schema)
finally:
os.remove(delete_row_csv_filepath)
class SchemaBase(Entity, metaclass=abc.ABCMeta):
"""
This is the an Abstract Class for EntityViewSchema and Schema containing the common methods for both.
You can not create an object of this type.
"""
_property_keys = Entity._property_keys + ['columnIds']
_local_keys = Entity._local_keys + ['columns_to_store']
@property
@abc.abstractmethod # forces subclasses to define _synapse_entity_type
def _synapse_entity_type(self):
pass
@abc.abstractmethod
def __init__(self, name, columns, properties, annotations, local_state, parent, **kwargs):
self.properties.setdefault('columnIds', [])
self.__dict__.setdefault('columns_to_store', [])
if name:
kwargs['name'] = name
super(SchemaBase, self).__init__(properties=properties, annotations=annotations, local_state=local_state,
parent=parent, **kwargs)
if columns:
self.addColumns(columns)
def addColumn(self, column):
"""
:param column: a column object or its ID
"""
if isinstance(column, str) or isinstance(column, int) or hasattr(column, 'id'):
self.properties.columnIds.append(id_of(column))
elif isinstance(column, Column):
if not self.__dict__.get('columns_to_store', None):
self.__dict__['columns_to_store'] = []
self.__dict__['columns_to_store'].append(column)
else:
raise ValueError("Not a column? %s" % str(column))
def addColumns(self, columns):
"""
:param columns: a list of column objects or their ID
"""
for column in columns:
self.addColumn(column)
def removeColumn(self, column):
"""
:param column: a column object or its ID
"""
if isinstance(column, str) or isinstance(column, int) or hasattr(column, 'id'):
self.properties.columnIds.remove(id_of(column))
elif isinstance(column, Column) and self.columns_to_store:
self.columns_to_store.remove(column)
else:
ValueError("Can't remove column %s" + str(column))
def has_columns(self):
"""Does this schema have columns specified?"""
return bool(self.properties.get('columnIds', None) or self.__dict__.get('columns_to_store', None))
def _before_synapse_store(self, syn):
if len(self.columns_to_store) + len(self.columnIds) > MAX_NUM_TABLE_COLUMNS:
raise ValueError("Too many columns. The limit is %s columns per table" % MAX_NUM_TABLE_COLUMNS)
# store any columns before storing table
if self.columns_to_store:
self.properties.columnIds.extend(column.id for column in syn.createColumns(self.columns_to_store))
self.columns_to_store = []
class Schema(SchemaBase):
"""
A Schema is an :py:class:`synapseclient.entity.Entity` that defines a set of columns in a table.
:param name: the name for the Table Schema object
:param description: User readable description of the schema
:param columns: a list of :py:class:`Column` objects or their IDs
:param parent: the project in Synapse to which this table belongs
:param properties: A map of Synapse properties
:param annotations: A map of user defined annotations
:param local_state: Internal use only
Example::
cols = [Column(name='Isotope', columnType='STRING'),
Column(name='Atomic Mass', columnType='INTEGER'),
Column(name='Halflife', columnType='DOUBLE'),
Column(name='Discovered', columnType='DATE')]
schema = syn.store(Schema(name='MyTable', columns=cols, parent=project))
"""
_synapse_entity_type = 'org.sagebionetworks.repo.model.table.TableEntity'
def __init__(self, name=None, columns=None, parent=None, properties=None, annotations=None, local_state=None,
**kwargs):
super(Schema, self).__init__(name=name, columns=columns, properties=properties,
annotations=annotations, local_state=local_state, parent=parent, **kwargs)
class MaterializedViewSchema(SchemaBase):
"""
A MaterializedViewSchema is an :py:class:`synapseclient.entity.Entity` that defines a set of columns in a
materialized view along with the SQL statement.
:param name: the name for the Materialized View Schema object
:param description: User readable description of the schema
:param definingSQL: The synapse SQL statement that defines the data in the materialized view. The SQL may
contain JOIN clauses on multiple tables.
:param columns: a list of :py:class:`Column` objects or their IDs
:param parent: the project in Synapse to which this Materialized View belongs
:param properties: A map of Synapse properties
:param annotations: A map of user defined annotations
:param local_state: Internal use only
Example::
defining_sql = "SELECT * FROM syn111 F JOIN syn2222 P on (F.patient_id = P.patient_id)"
schema = syn.store(MaterializedViewSchema(name='MyTable', parent=project, definingSQL=defining_sql))
"""
_synapse_entity_type = 'org.sagebionetworks.repo.model.table.MaterializedView'
_property_keys = SchemaBase._property_keys + ['definingSQL']
def __init__(self, name=None, columns=None, parent=None, definingSQL=None, properties=None, annotations=None,
local_state=None, **kwargs):
if definingSQL is not None:
kwargs['definingSQL'] = definingSQL
super(MaterializedViewSchema, self).__init__(
name=name, columns=columns, properties=properties,
annotations=annotations, local_state=local_state, parent=parent, **kwargs
)
class ViewBase(SchemaBase):
"""
This is a helper class for EntityViewSchema and SubmissionViewSchema
containing the common methods for both.
"""
_synapse_entity_type = ""
_property_keys = SchemaBase._property_keys + ['viewTypeMask', 'scopeIds']
_local_keys = SchemaBase._local_keys + ['addDefaultViewColumns', 'addAnnotationColumns',
'ignoredAnnotationColumnNames']
def add_scope(self, entities):
"""
:param entities: a Project, Folder, Evaluation object or its ID, can also be a list of them
"""
if isinstance(entities, list):
# add ids to a temp list so that we don't partially modify scopeIds on an exception in id_of()
temp_list = [id_of(entity) for entity in entities]
self.scopeIds.extend(temp_list)
else:
self.scopeIds.append(id_of(entities))
def _filter_duplicate_columns(self, syn, columns_to_add):
"""
If a column to be added has the same name and same type as an existing column, it will be considered a duplicate
and not added.
:param syn: a :py:class:`synapseclient.client.Synapse` object that is logged in
:param columns_to_add: iterable collection of type :py:class:`synapseclient.table.Column` objects
:return: a filtered list of columns to add
"""
# no point in making HTTP calls to retrieve existing Columns if we not adding any new columns
if not columns_to_add:
return columns_to_add
# set up Column name/type tracking
# map of str -> set(str), where str is the column type as a string and set is a set of column name strings
column_type_to_annotation_names = {}
# add to existing columns the columns that user has added but not yet created in synapse
column_generator = itertools.chain(syn.getColumns(self.columnIds),
self.columns_to_store) if self.columns_to_store \
else syn.getColumns(self.columnIds)
for column in column_generator:
column_name = column['name']
column_type = column['columnType']
column_type_to_annotation_names.setdefault(column_type, set()).add(column_name)
valid_columns = []
for column in columns_to_add:
new_col_name = column['name']
new_col_type = column['columnType']
typed_col_name_set = column_type_to_annotation_names.setdefault(new_col_type, set())
if new_col_name not in typed_col_name_set:
typed_col_name_set.add(new_col_name)
valid_columns.append(column)
return valid_columns
def _before_synapse_store(self, syn):
# get the default EntityView columns from Synapse and add them to the columns list
additional_columns = []
view_type = self._synapse_entity_type.split(".")[-1].lower()
mask = self.get("viewTypeMask")
if self.addDefaultViewColumns:
additional_columns.extend(
syn._get_default_view_columns(view_type, view_type_mask=mask)
)
# get default annotations
if self.addAnnotationColumns:
anno_columns = [x for x in syn._get_annotation_view_columns(self.scopeIds, view_type,
view_type_mask=mask)
if x['name'] not in self.ignoredAnnotationColumnNames]
additional_columns.extend(anno_columns)
self.addColumns(self._filter_duplicate_columns(syn, additional_columns))
# set these boolean flags to false so they are not repeated.
self.addDefaultViewColumns = False
self.addAnnotationColumns = False
super(ViewBase, self)._before_synapse_store(syn)
class Dataset(ViewBase):
"""
A Dataset is an :py:class:`synapseclient.entity.Entity` that defines a
flat list of entities as a tableview (a.k.a. a "dataset").
:param name: The name for the Dataset object
:param description: User readable description of the schema
:param columns: A list of :py:class:`Column` objects or their IDs
:param parent: The Synapse Project to which this Dataset belongs
:param properties: A map of Synapse properties
:param annotations: A map of user defined annotations
:param dataset_items: A list of items characterized by entityId and versionNumber
:param folder: A list of Folder IDs
:param local_state: Internal use only
Example::
from synapseclient import Dataset
# Create a Dataset with pre-defined DatasetItems. Default Dataset columns
# are used if no schema is provided.
dataset_items = [
{'entityId': "syn000", 'versionNumber': 1},
{...},
]
dataset = syn.store(Dataset(
name="My Dataset",
parent=project,
dataset_items=dataset_items))
# Add/remove specific Synapse IDs to/from the Dataset
dataset.add_item({'entityId': "syn111", 'versionNumber': 1})
dataset.remove_item("syn000")
dataset = syn.store(dataset)
# Add a list of Synapse IDs to the Dataset
new_items = [
{'entityId': "syn222", 'versionNumber': 2},
{'entityId': "syn333", 'versionNumber': 1}
]
dataset.add_items(new_items)
dataset = syn.store(dataset)
Folders can easily be added recursively to a dataset, that is, all files
within the folder (including sub-folders) will be added. Note that using
the following methods will add files with the latest version number ONLY.
If another version number is desired, use :py:classmethod:`synapseclient.table.add_item`
or :py:classmethod:`synapseclient.table.add_items`.
Example::
# Add a single Folder to the Dataset
dataset.add_folder("syn123")
# Add a list of Folders, overwriting any existing files in the dataset
dataset.add_folders(["syn456", "syn789"], force=True)
dataset = syn.store(dataset)
empty() can be used to truncate a dataset, that is, remove all current
items from the set.
Example::
dataset.empty()
dataset = syn.store(dataset)
To get the number of entities in the dataset, use len().
Example::
print(f"{dataset.name} has {len(dataset)} items.")
To create a snapshot version of the Dataset, use
:py:classmethod:`synapseclient.client.create_snapshot_version`.
Example::
syn = synapseclient.login()
syn.create_snapshot_version(
dataset.id,
label="v1.0",
comment="This is version 1")
"""
_synapse_entity_type: str = "org.sagebionetworks.repo.model.table.Dataset"
_property_keys: List[str] = ViewBase._property_keys + ['datasetItems']
_local_keys: List[str] = ViewBase._local_keys + ['folders_to_add', 'force']
def __init__(self, name=None, columns=None, parent=None, properties=None,
addDefaultViewColumns=True, addAnnotationColumns=True, ignoredAnnotationColumnNames=[],
annotations=None, local_state=None, dataset_items=None,
folders=None, force=False, **kwargs):
self.properties.setdefault('datasetItems', [])
self.__dict__.setdefault('folders_to_add', set())
self.ignoredAnnotationColumnNames = set(ignoredAnnotationColumnNames)
self.viewTypeMask = EntityViewType.DATASET.value
super(Dataset, self).__init__(
name=name, columns=columns, properties=properties,
annotations=annotations, local_state=local_state, parent=parent,
**kwargs
)
self.force = force
if dataset_items:
self.add_items(dataset_items, force)
if folders:
self.add_folders(folders, force)
# HACK: make sure we don't try to add columns to schemas that we retrieve from synapse
is_from_normal_constructor = not (properties or local_state)
# allowing annotations because user might want to update annotations all at once
self.addDefaultViewColumns = addDefaultViewColumns and is_from_normal_constructor
self.addAnnotationColumns = addAnnotationColumns and is_from_normal_constructor
def __len__(self):
return len(self.properties.datasetItems)
@staticmethod
def _check_needed_keys(keys: List[str]):
required_keys = {'entityId', 'versionNumber'}
if required_keys - keys:
raise LookupError("DatasetItem missing a required property: %s" %
str(required_keys - keys))
return True
def add_item(self, dataset_item: Dict[str, str], force: bool = True):
"""
:param dataset_item: a single dataset item
:param force: force add item
"""
if isinstance(dataset_item, dict) and self._check_needed_keys(dataset_item.keys()):
if not self.has_item(dataset_item.get('entityId')):
self.properties.datasetItems.append(dataset_item)
else:
if force:
self.remove_item(dataset_item.get('entityId'))
self.properties.datasetItems.append(dataset_item)
else:
raise ValueError(
f"Duplicate item found: {dataset_item.get('entityId')}. "
"Set force=True to overwrite the existing item.")
else:
raise ValueError("Not a DatasetItem? %s" % str(dataset_item))
def add_items(self, dataset_items: List[Dict[str, str]], force: bool = True):
"""
:param dataset_items: a list of dataset items
:param force: force add items
"""
for dataset_item in dataset_items:
self.add_item(dataset_item, force)
def remove_item(self, item_id: str):
"""
:param item_id: a single dataset item Synapse ID
"""
item_id = id_of(item_id)
if item_id.startswith("syn"):
for i, curr_item in enumerate(self.properties.datasetItems):
if curr_item.get('entityId') == item_id:
del self.properties.datasetItems[i]
break
else:
raise ValueError("Not a Synapse ID: %s" % str(item_id))
def empty(self):
self.properties.datasetItems = []
def has_item(self, item_id):
"""
:param item_id: a single dataset item Synapse ID
"""
return any(item['entityId'] == item_id for item in self.properties.datasetItems)
def add_folder(self, folder: str, force: bool = True):
"""
:param folder: a single Synapse Folder ID
:param force: force add items from folder
"""
if not self.__dict__.get('folders_to_add', None):
self.__dict__['folders_to_add'] = set()
self.__dict__['folders_to_add'].add(folder)
# if self.force != force:
self.force = force
def add_folders(self, folders: List[str], force: bool = True):
"""
:param folders: a list of Synapse Folder IDs
:param force: force add items from folders
"""
if isinstance(folders, list) or isinstance(folders, set) or \
isinstance(folders, tuple):
self.force = force
for folder in folders:
self.add_folder(folder, force)
else:
raise ValueError(f"Not a list of Folder IDs: {folders}")
def _add_folder_files(self, syn, folder):
files = []
children = syn.getChildren(folder)
for child in children:
if child.get("type") == "org.sagebionetworks.repo.model.Folder":
files.extend(self._add_folder_files(syn, child.get("id")))
elif child.get("type") == "org.sagebionetworks.repo.model.FileEntity":
files.append({
'entityId': child.get("id"),
'versionNumber': child.get('versionNumber')
})
else:
raise ValueError(f"Not a Folder?: {folder}")
return files
def _before_synapse_store(self, syn):
# Add files from folders (if any) before storing dataset.
if self.folders_to_add:
for folder in self.folders_to_add:
items_to_add = self._add_folder_files(syn, folder)
self.add_items(items_to_add, self.force)
self.folders_to_add = set()
# Must set this scopeIds is used to get all annotations from the
# entities
self.scopeIds = [item['entityId'] for item in self.properties.datasetItems]
super()._before_synapse_store(syn)
# Reset attribute to force-add items from folders.
self.force = True
# Remap `datasetItems` back to `items` before storing (since `items`
# is the accepted field name in the API, not `datasetItems`).
self.properties.items = self.properties.datasetItems
class EntityViewSchema(ViewBase):
"""
A EntityViewSchema is a :py:class:`synapseclient.entity.Entity` that displays all files/projects
(depending on user choice) within a given set of scopes
:param name: the name of the Entity View Table object
:param columns: a list of :py:class:`Column` objects or their IDs. These are optional.
:param parent: the project in Synapse to which this table belongs
:param scopes: a list of Projects/Folders or their ids
:param type: This field is deprecated. Please use `includeEntityTypes`
:param includeEntityTypes: a list of entity types to include in the view. Supported entity types are:
EntityViewType.FILE,
EntityViewType.PROJECT,
EntityViewType.TABLE,
EntityViewType.FOLDER,
EntityViewType.VIEW,
EntityViewType.DOCKER
If none is provided, the view will default to include EntityViewType.FILE.
:param addDefaultViewColumns: If true, adds all default columns (e.g. name, createdOn, modifiedBy etc.)
Defaults to True.
The default columns will be added after a call to
:py:meth:`synapseclient.Synapse.store`.
:param addAnnotationColumns: If true, adds columns for all annotation keys defined across all Entities in
the EntityViewSchema's scope. Defaults to True.
The annotation columns will be added after a call to
:py:meth:`synapseclient.Synapse.store`.
:param ignoredAnnotationColumnNames: A list of strings representing annotation names.
When addAnnotationColumns is True, the names in this list will not be
automatically added as columns to the EntityViewSchema if they exist in any
of the defined scopes.
:param properties: A map of Synapse properties
:param annotations: A map of user defined annotations
:param local_state: Internal use only
Example::
from synapseclient import EntityViewType
project_or_folder = syn.get("syn123")
schema = syn.store(EntityViewSchema(name='MyTable', parent=project, scopes=[project_or_folder_id, 'syn123'],
includeEntityTypes=[EntityViewType.FILE]))
"""
_synapse_entity_type = 'org.sagebionetworks.repo.model.table.EntityView'
def __init__(self, name=None, columns=None, parent=None, scopes=None, type=None, includeEntityTypes=None,
addDefaultViewColumns=True, addAnnotationColumns=True, ignoredAnnotationColumnNames=[],
properties=None, annotations=None, local_state=None, **kwargs):
if includeEntityTypes:
kwargs['viewTypeMask'] = _get_view_type_mask(includeEntityTypes)
elif type:
kwargs['viewTypeMask'] = _get_view_type_mask_for_deprecated_type(type)
elif properties and 'type' in properties:
kwargs['viewTypeMask'] = _get_view_type_mask_for_deprecated_type(properties['type'])
properties['type'] = None
self.ignoredAnnotationColumnNames = set(ignoredAnnotationColumnNames)
super(EntityViewSchema, self).__init__(name=name, columns=columns, properties=properties,
annotations=annotations, local_state=local_state, parent=parent,
**kwargs)
# This is a hacky solution to make sure we don't try to add columns to schemas that we retrieve from synapse
is_from_normal_constructor = not (properties or local_state)
# allowing annotations because user might want to update annotations all at once
self.addDefaultViewColumns = addDefaultViewColumns and is_from_normal_constructor
self.addAnnotationColumns = addAnnotationColumns and is_from_normal_constructor
# set default values after constructor so we don't overwrite the values defined in properties using .get()
# because properties, unlike local_state, do not have nonexistent keys assigned with a value of None
if self.get('viewTypeMask') is None:
self.viewTypeMask = EntityViewType.FILE.value
if self.get('scopeIds') is None:
self.scopeIds = []
# add the scopes last so that we can append the passed in scopes to those defined in properties
if scopes is not None:
self.add_scope(scopes)
def set_entity_types(self, includeEntityTypes):
"""
:param includeEntityTypes: a list of entity types to include in the view. This list will replace the previous
settings. Supported entity types are:
EntityViewType.FILE,
EntityViewType.PROJECT,
EntityViewType.TABLE,
EntityViewType.FOLDER,
EntityViewType.VIEW,
EntityViewType.DOCKER
"""
self.viewTypeMask = _get_view_type_mask(includeEntityTypes)
class SubmissionViewSchema(ViewBase):
"""
A SubmissionViewSchema is a :py:class:`synapseclient.entity.Entity` that displays all files/projects
(depending on user choice) within a given set of scopes
:param name: the name of the Entity View Table object
:param columns: a list of :py:class:`Column` objects or their IDs. These are optional.
:param parent: the project in Synapse to which this table belongs
:param scopes: a list of Evaluation Queues or their ids
:param addDefaultViewColumns: If true, adds all default columns (e.g. name, createdOn, modifiedBy etc.)
Defaults to True.
The default columns will be added after a call to
:py:meth:`synapseclient.Synapse.store`.
:param addAnnotationColumns: If true, adds columns for all annotation keys defined across all Entities in
the SubmissionViewSchema's scope. Defaults to True.
The annotation columns will be added after a call to
:py:meth:`synapseclient.Synapse.store`.
:param ignoredAnnotationColumnNames: A list of strings representing annotation names.
When addAnnotationColumns is True, the names in this list will not be
automatically added as columns to the SubmissionViewSchema if they exist in
any of the defined scopes.
:param properties: A map of Synapse properties
:param annotations: A map of user defined annotations
:param local_state: Internal use only
Example::
from synapseclient import SubmissionViewSchema
project = syn.get("syn123")
schema = syn.store(SubmissionViewSchema(name='My Submission View', parent=project, scopes=['9614543']))
"""
_synapse_entity_type = 'org.sagebionetworks.repo.model.table.SubmissionView'
def __init__(self, name=None, columns=None, parent=None, scopes=None,
addDefaultViewColumns=True, addAnnotationColumns=True,
ignoredAnnotationColumnNames=[],
properties=None, annotations=None, local_state=None, **kwargs):
self.ignoredAnnotationColumnNames = set(ignoredAnnotationColumnNames)
super(SubmissionViewSchema, self).__init__(
name=name, columns=columns, properties=properties,
annotations=annotations, local_state=local_state, parent=parent,
**kwargs
)
# This is a hacky solution to make sure we don't try to add columns to schemas that we retrieve from synapse
is_from_normal_constructor = not (properties or local_state)
# allowing annotations because user might want to update annotations all at once
self.addDefaultViewColumns = addDefaultViewColumns and is_from_normal_constructor
self.addAnnotationColumns = addAnnotationColumns and is_from_normal_constructor
if self.get('scopeIds') is None:
self.scopeIds = []
# add the scopes last so that we can append the passed in scopes to those defined in properties
if scopes is not None:
self.add_scope(scopes)
# add Schema to the map of synapse entity types to their Python representations
for cls in itersubclasses(SchemaBase):
entity_type_to_class[cls._synapse_entity_type] = cls
# HACK: viewbase extends schema base, so need to remove ViewBase
entity_type_to_class.pop('')
class SelectColumn(DictObject):
"""
Defines a column to be used in a table :py:class:`synapseclient.table.Schema`.
:var id: An immutable ID issued by the platform
:param columnType: Can be any of: "STRING", "DOUBLE", "INTEGER", "BOOLEAN", "DATE", "FILEHANDLEID", "ENTITYID"
:param name: The display name of the column
:type id: string
:type columnType: string
:type name: string
"""
def __init__(self, id=None, columnType=None, name=None, **kwargs):
super(SelectColumn, self).__init__()
if id:
self.id = id
if name:
self.name = name
if columnType:
self.columnType = columnType
# Notes that this param is only used to support forward compatibility.
self.update(kwargs)
@classmethod
def from_column(cls, column):
return cls(column.get('id', None), column.get('columnType', None), column.get('name', None))
class Column(DictObject):
"""
Defines a column to be used in a table :py:class:`synapseclient.table.Schema`
:py:class:`synapseclient.table.EntityViewSchema`.
:var id: An immutable ID issued by the platform
:param columnType: The column type determines the type of data that can be stored in a column. It can be any
of: "STRING", "DOUBLE", "INTEGER", "BOOLEAN", "DATE", "FILEHANDLEID", "ENTITYID", "LINK",
"LARGETEXT", "USERID". For more information, please see:
https://docs.synapse.org/rest/org/sagebionetworks/repo/model/table/ColumnType.html
:param maximumSize: A parameter for columnTypes with a maximum size. For example, ColumnType.STRINGs have a
default maximum size of 50 characters, but can be set to a maximumSize of 1 to 1000
characters.
:param maximumListLength: Required if using a columnType with a "_LIST" suffix. Describes the maximum number of
values that will appear in that list. Value range 1-100 inclusive. Default 100
:param name: The display name of the column
:param enumValues: Columns type of STRING can be constrained to an enumeration values set on this list.
:param defaultValue: The default value for this column. Columns of type FILEHANDLEID and ENTITYID are not
allowed to have default values.
:type id: string
:type maximumSize: integer
:type maximumListLength: integer
:type columnType: string
:type name: string
:type enumValues: array of strings
:type defaultValue: string
"""
@classmethod
def getURI(cls, id):
return '/column/%s' % id
def __init__(self, **kwargs):
super(Column, self).__init__(kwargs)
self['concreteType'] = concrete_types.COLUMN_MODEL
def postURI(self):
return '/column'
class AppendableRowset(DictObject, metaclass=abc.ABCMeta):
"""Abstract Base Class for :py:class:`Rowset` and :py:class:`PartialRowset`"""
@abc.abstractmethod
def __init__(self, schema, **kwargs):
if ('tableId' not in kwargs) and schema:
kwargs['tableId'] = id_of(schema)
if not kwargs.get('tableId', None):
raise ValueError("Table schema ID must be defined to create a %s" % type(self).__name__)
super(AppendableRowset, self).__init__(kwargs)
def _synapse_store(self, syn):
"""
Creates and POSTs an AppendableRowSetRequest_
.. AppendableRowSetRequest:
http://docs.synapse.org/rest/org/sagebionetworks/repo/model/table/AppendableRowSetRequest.html
"""
append_rowset_request = {'concreteType': concrete_types.APPENDABLE_ROWSET_REQUEST,
'toAppend': self,
'entityId': self.tableId}
response = syn._async_table_update(self.tableId, [append_rowset_request], wait=True)
syn._check_table_transaction_response(response)
return response['results'][0]
class PartialRowset(AppendableRowset):
"""A set of Partial Rows used for updating cells of a table.
PartialRowsets allow you to push only the individual cells you wish to change instead of pushing entire rows with
many unchanged cells.
Example::
#### the following code will change cells in a hypothetical table, syn123:
#### these same steps will also work for using EntityView tables to change Entity annotations
#
# fooCol | barCol fooCol | barCol
# ----------------- =======> ----------------------
# foo1 | bar1 foo foo1 | bar1
# foo2 | bar2 foo2 | bar bar 2
query_results = syn.tableQuery("SELECT * FROM syn123")
# The easiest way to know the rowId of the row you wish to change
# is by converting the table to a pandas DataFrame with rowIdAndVersionInIndex=False
df = query_results.asDataFrame(rowIdAndVersionInIndex=False)
partial_changes = {df['ROW_ID'][0]: {'fooCol': 'foo foo 1'},
df['ROW_ID'][1]: {'barCol': 'bar bar 2'}}
# you will need to pass in your original query result as an argument
# so that we can perform column id translation and etag retrieval on your behalf:
partial_rowset = PartialRowset.from_mapping(partial_changes, query_results)
syn.store(partial_rowset)
:param schema: The :py:class:`Schema` of the table to update or its tableId as a string
:param rows: A list of PartialRows
"""
@classmethod
def from_mapping(cls, mapping, originalQueryResult):
"""Creates a PartialRowset
:param mapping: A mapping of mappings in the structure: {ROW_ID : {COLUMN_NAME: NEW_COL_VALUE}}
:param originalQueryResult:
:return: a PartialRowSet that can be syn.store()-ed to apply the changes
"""
if not isinstance(mapping, collections.abc.Mapping):
raise ValueError("mapping must be a supported Mapping type such as 'dict'")
try:
name_to_column_id = {col.name: col.id for col in originalQueryResult.headers if 'id' in col}
except AttributeError:
raise ValueError('originalQueryResult must be the result of a syn.tableQuery()')
row_ids = set(int(id) for id in mapping.keys())
# row_ids in the originalQueryResult are not guaranteed to be in ascending order
# iterate over all etags but only map the row_ids used for this partial update to their etags
row_etags = {row_id: etag for row_id, row_version, etag in originalQueryResult.iter_row_metadata()
if row_id in row_ids and etag is not None}
partial_rows = [PartialRow(row_changes, row_id, etag=row_etags.get(int(row_id)),
nameToColumnId=name_to_column_id)
for row_id, row_changes in mapping.items()]
return cls(originalQueryResult.tableId, partial_rows)
def __init__(self, schema, rows):
super(PartialRowset, self).__init__(schema)
self.concreteType = concrete_types.PARTIAL_ROW_SET
if isinstance(rows, PartialRow):
self.rows = [rows]
else:
try:
if all(isinstance(row, PartialRow) for row in rows):
self.rows = list(rows)
else:
raise ValueError("rows must contain only values of type PartialRow")
except TypeError:
raise ValueError("rows must be iterable")
class RowSet(AppendableRowset):
"""
A Synapse object of type `org.sagebionetworks.repo.model.table.RowSet \
<http://docs.synapse.org/rest/org/sagebionetworks/repo/model/table/RowSet.html>`_.
:param schema: A :py:class:`synapseclient.table.Schema` object that will be used to set the tableId
:param headers: The list of SelectColumn objects that describe the fields in each row.
:param columns: An alternative to 'headers', a list of column objects that describe the fields in each row.
:param tableId: The ID of the TableEntity that owns these rows
:param rows: The :py:class:`synapseclient.table.Row` s of this set. The index of each row value aligns with the
index of each header.
:var etag: Any RowSet returned from Synapse will contain the current etag of the change set. To update any
rows from a RowSet the etag must be provided with the POST.
:type headers: array of SelectColumns
:type etag: string
:type tableId: string
:type rows: array of rows
"""
@classmethod
def from_json(cls, json):
headers = [SelectColumn(**header) for header in json.get('headers', [])]
rows = [cast_row(Row(**row), headers) for row in json.get('rows', [])]
return cls(headers=headers, rows=rows,
**{key: json[key] for key in json.keys() if key not in ['headers', 'rows']})
def __init__(self, columns=None, schema=None, **kwargs):
if 'headers' not in kwargs:
if columns and schema:
raise ValueError("Please only user either 'columns' or 'schema' as an argument but not both.")
if columns:
kwargs.setdefault('headers', []).extend([SelectColumn.from_column(column) for column in columns])
elif schema and isinstance(schema, Schema):
kwargs.setdefault('headers', []).extend([SelectColumn(id=id) for id in schema["columnIds"]])
if not kwargs.get('headers', None):
raise ValueError("Column headers must be defined to create a RowSet")
kwargs['concreteType'] = 'org.sagebionetworks.repo.model.table.RowSet'
super(RowSet, self).__init__(schema, **kwargs)
def _synapse_store(self, syn):
response = super(RowSet, self)._synapse_store(syn)
return response.get('rowReferenceSet', response)
def _synapse_delete(self, syn):
"""
Delete the rows in the RowSet.
Example::
syn.delete(syn.tableQuery('select name from %s where no_good = true' % schema1.id))
"""
row_id_vers_generator = ((row.rowId, row.versionNumber) for row in self.rows)
_delete_rows(syn, self.tableId, row_id_vers_generator)
class Row(DictObject):
"""
A `row <http://docs.synapse.org/rest/org/sagebionetworks/repo/model/table/Row.html>`_ in a Table.
:param values: A list of values
:param rowId: The immutable ID issued to a new row
:param versionNumber: The version number of this row. Each row version is immutable, so when a row is updated a
new version is created.
"""
def __init__(self, values, rowId=None, versionNumber=None, etag=None, **kwargs):
super(Row, self).__init__()
self.values = values
if rowId is not None:
self.rowId = rowId
if versionNumber is not None:
self.versionNumber = versionNumber
if etag is not None:
self.etag = etag
# Notes that this param is only used to support forward compatibility.
self.update(kwargs)
class PartialRow(DictObject):
"""This is a lower-level class for use in :py:class::`PartialRowSet` to update individual cells within a table.
It is recommended you use :py:classmethod::`PartialRowSet.from_mapping`to construct partial change sets to a table.
If you want to do the tedious parts yourself:
To change cells in the "foo"(colId:1234) and "bar"(colId:456) columns of a row with rowId=5 ::
rowId = 5
#pass in with columnIds as key:
PartialRow({123: 'fooVal', 456:'barVal'}, rowId)
#pass in with a nameToColumnId argument
#manually define:
nameToColumnId = {'foo':123, 'bar':456}
#OR if you have the result of a tableQuery() you can generate nameToColumnId using:
query_result = syn.tableQuery("SELECT * FROM syn123")
nameToColumnId = {col.name:col.id for col in query_result.headers}
PartialRow({'foo': 'fooVal', 'bar':'barVal'}, rowId, nameToColumnId=nameToColumnId)
:param values: A Mapping where:
- key is name of the column (or its columnId) to change in the desired row
- value is the new desired value for that column
:param rowId: The id of the row to be updated
:param etag: used for updating File/Project Views(::py:class:`EntityViewSchema`). Not necessary for a
(::py:class:`Schema`) Table
:param nameToColumnId: Optional map column names to column Ids. If this is provided, the keys of your `values`
Mapping will be replaced with the column ids in the `nameToColumnId` dict. Include this
as an argument when you are providing the column names instead of columnIds as the keys
to the `values` Mapping.
"""
def __init__(self, values, rowId, etag=None, nameToColumnId=None):
super(PartialRow, self).__init__()
if not isinstance(values, collections.abc.Mapping):
raise ValueError("values must be a Mapping")
rowId = int(rowId)
self.values = [{'key': nameToColumnId[x_key] if nameToColumnId is not None else x_key,
'value': x_value} for x_key, x_value in values.items()]
self.rowId = rowId
if etag is not None:
self.etag = etag
def build_table(name, parent, values):
"""
Build a Table object
:param name: the name for the Table Schema object
:param parent: the project in Synapse to which this table belongs
:param values: an object that holds the content of the tables
- a string holding the path to a CSV file
- a Pandas `DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_
:return: a Table object suitable for storing
Example::
path = "/path/to/file.csv"
table = build_table("simple_table", "syn123", path)
table = syn.store(table)
import pandas as pd
df = pd.DataFrame(dict(a=[1, 2, 3], b=["c", "d", "e"]))
table = build_table("simple_table", "syn123", df)
table = syn.store(table)
"""
test_import_pandas()
import pandas as pd
if not isinstance(values, pd.DataFrame) and not isinstance(values, str):
raise ValueError("Values of type %s is not yet supported." % type(values))
cols = as_table_columns(values)
schema = Schema(name=name, columns=cols, parent=parent)
headers = [SelectColumn.from_column(col) for col in cols]
return Table(schema, values, headers=headers)
def Table(schema, values, **kwargs):
"""
Combine a table schema and a set of values into some type of Table object
depending on what type of values are given.
:param schema: a table :py:class:`Schema` object or Synapse Id of Table.
:param values: an object that holds the content of the tables
- a :py:class:`RowSet`
- a list of lists (or tuples) where each element is a row
- a string holding the path to a CSV file
- a Pandas `DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_
- a dict which will be wrapped by a Pandas \
`DataFrame <http://pandas.pydata.org/pandas-docs/stable/api.html#dataframe>`_
:return: a Table object suitable for storing
Usually, the immediate next step after creating a Table object is to store it::
table = syn.store(Table(schema, values))
End users should not need to know the details of these Table subclasses:
- :py:class:`TableAbstractBaseClass`
- :py:class:`RowSetTable`
- :py:class:`TableQueryResult`
- :py:class:`CsvFileTable`
"""
try:
import pandas as pd
pandas_available = True
except: # noqa
pandas_available = False
# a RowSet
if isinstance(values, RowSet):
return RowSetTable(schema, values, **kwargs)
# a list of rows
elif isinstance(values, (list, tuple)):
return CsvFileTable.from_list_of_rows(schema, values, **kwargs)
# filename of a csv file
elif isinstance(values, str):
return CsvFileTable(schema, filepath=values, **kwargs)
# pandas DataFrame
elif pandas_available and isinstance(values, pd.DataFrame):
return CsvFileTable.from_data_frame(schema, values, **kwargs)
# dict
elif pandas_available and isinstance(values, dict):
return CsvFileTable.from_data_frame(schema, pd.DataFrame(values), **kwargs)
else:
raise ValueError("Don't know how to make tables from values of type %s." % type(values))
class TableAbstractBaseClass(collections.abc.Iterable, collections.abc.Sized):
"""
Abstract base class for Tables based on different data containers.
"""
RowMetadataTuple = collections.namedtuple('RowMetadataTuple', ['row_id', 'row_version', 'row_etag'])
def __init__(self, schema, headers=None, etag=None):
if isinstance(schema, Schema):
self.schema = schema
self.tableId = schema.id if schema and 'id' in schema else None
self.headers = headers if headers else [SelectColumn(id=id) for id in schema.columnIds]
self.etag = etag
elif isinstance(schema, str):
self.schema = None
self.tableId = schema
self.headers = headers
self.etag = etag
else:
ValueError("Must provide a schema or a synapse ID of a Table Entity")
def asDataFrame(self):
raise NotImplementedError()
def asInteger(self):
try:
first_row = next(iter(self))
return int(first_row[0])
except (KeyError, TypeError):
raise ValueError("asInteger is only valid for queries such as count queries whose first value is an"
" integer.")
def asRowSet(self):
return RowSet(headers=self.headers,
tableId=self.tableId,
etag=self.etag,
rows=[row if isinstance(row, Row) else Row(row) for row in self])
def _synapse_store(self, syn):
raise NotImplementedError()
def _synapse_delete(self, syn):
"""
Delete the rows that result from a table query.
Example::
syn.delete(syn.tableQuery('select name from %s where no_good = true' % schema1.id))
"""
row_id_vers_generator = ((metadata.row_id, metadata.row_version) for metadata in self.iter_row_metadata())
_delete_rows(syn, self.tableId, row_id_vers_generator)
@abc.abstractmethod
def iter_row_metadata(self):
"""Iterates the table results to get row_id and row_etag. If an etag does not exist for a row, it will
generated as (row_id, None)
:return: a generator that gives :py:class::`collections.namedtuple` with format (row_id, row_etag)
"""
pass
class RowSetTable(TableAbstractBaseClass):
"""
A Table object that wraps a RowSet.
"""
def __init__(self, schema, rowset):
super(RowSetTable, self).__init__(schema, etag=rowset.get('etag', None))
self.rowset = rowset
def _synapse_store(self, syn):
row_reference_set = syn.store(self.rowset)
return RowSetTable(self.schema, row_reference_set)
def asDataFrame(self):
test_import_pandas()
import pandas as pd
if any([row['rowId'] for row in self.rowset['rows']]):
rownames = row_labels_from_rows(self.rowset['rows'])
else:
rownames = None
series = collections.OrderedDict()
for i, header in enumerate(self.rowset["headers"]):
series[header.name] = pd.Series(name=header.name,
data=[row['values'][i] for row in self.rowset['rows']],
index=rownames)
return pd.DataFrame(data=series, index=rownames)
def asRowSet(self):
return self.rowset
def asInteger(self):
try:
return int(self.rowset['rows'][0]['values'][0])
except (KeyError, TypeError):
raise ValueError("asInteger is only valid for queries such as count queries whose first value is an"
" integer.")
def __iter__(self):
def iterate_rows(rows, headers):
for row in rows:
yield cast_values(row, headers)
return iterate_rows(self.rowset['rows'], self.rowset['headers'])
def __len__(self):
return len(self.rowset['rows'])
def iter_row_metadata(self):
raise NotImplementedError("iter_metadata is not supported for RowSetTable")
class TableQueryResult(TableAbstractBaseClass):
"""
An object to wrap rows returned as a result of a table query.
The TableQueryResult object can be used to iterate over results of a query.
Example ::
results = syn.tableQuery("select * from syn1234")
for row in results:
print(row)
"""
def __init__(self, synapse, query, limit=None, offset=None, isConsistent=True):
self.syn = synapse
self.query = query
self.limit = limit
self.offset = offset
self.isConsistent = isConsistent
result = self.syn._queryTable(
query=query,
limit=limit,
offset=offset,
isConsistent=isConsistent)
self.rowset = RowSet.from_json(result['queryResult']['queryResults'])
self.columnModels = [Column(**col) for col in result.get('columnModels', [])]
self.nextPageToken = result['queryResult'].get('nextPageToken', None)
self.count = result.get('queryCount', None)
self.maxRowsPerPage = result.get('maxRowsPerPage', None)
self.i = -1
super(TableQueryResult, self).__init__(
schema=self.rowset.get('tableId', None),
headers=self.rowset.headers,
etag=self.rowset.get('etag', None))
def _synapse_store(self, syn):
raise SynapseError(
"A TableQueryResult is a read only object and can't be stored in Synapse. Convert to a"
" DataFrame or RowSet instead."
)
def asDataFrame(self, rowIdAndVersionInIndex=True):
"""
Convert query result to a Pandas DataFrame.
:param rowIdAndVersionInIndex: Make the dataframe index consist of the row_id and row_version (and row_etag
if it exists)
"""
test_import_pandas()
import pandas as pd
# To turn a TableQueryResult into a data frame, we add a page of rows
# at a time on the untested theory that it's more efficient than
# adding a single row at a time to the data frame.
def construct_rownames(rowset, offset=0):
try:
return row_labels_from_rows(rowset['rows']) if rowIdAndVersionInIndex else None
except KeyError:
# if we don't have row id and version, just number the rows
# python3 cast range to list for safety
return list(range(offset, offset + len(rowset['rows'])))
# first page of rows
offset = 0
rownames = construct_rownames(self.rowset, offset)
offset += len(self.rowset['rows'])
series = collections.OrderedDict()
if not rowIdAndVersionInIndex:
# Since we use an OrderedDict this must happen before we construct the other columns
# add row id, verison, and etag as rows
append_etag = False # only useful when (not rowIdAndVersionInIndex), hooray for lazy variables!
series['ROW_ID'] = pd.Series(name='ROW_ID', data=[row['rowId'] for row in self.rowset['rows']])
series['ROW_VERSION'] = pd.Series(name='ROW_VERSION',
data=[row['versionNumber'] for row in self.rowset['rows']])
row_etag = [row.get('etag') for row in self.rowset['rows']]
if any(row_etag):
append_etag = True
series['ROW_ETAG'] = pd.Series(name='ROW_ETAG', data=row_etag)
for i, header in enumerate(self.rowset["headers"]):
column_name = header.name
series[column_name] = pd.Series(name=column_name,
data=[row['values'][i] for row in self.rowset['rows']],
index=rownames)
# subsequent pages of rows
while self.nextPageToken:
result = self.syn._queryTableNext(self.nextPageToken, self.tableId)
self.rowset = RowSet.from_json(result['queryResults'])
self.nextPageToken = result.get('nextPageToken', None)
self.i = 0
rownames = construct_rownames(self.rowset, offset)
offset += len(self.rowset['rows'])
if not rowIdAndVersionInIndex:
series['ROW_ID'].append(pd.Series(name='ROW_ID', data=[row['id'] for row in self.rowset['rows']]))
series['ROW_VERSION'].append(pd.Series(name='ROW_VERSION',
data=[row['version'] for row in self.rowset['rows']]))
if append_etag:
series['ROW_ETAG'] = pd.Series(name='ROW_ETAG',
data=[row.get('etag') for row in self.rowset['rows']])
for i, header in enumerate(self.rowset["headers"]):
column_name = header.name
series[column_name] = series[column_name].append(
pd.Series(name=column_name,
data=[row['values'][i] for row in self.rowset['rows']],
index=rownames),
# can't verify integrity when indices are just numbers instead of 'rowid_rowversion'
verify_integrity=rowIdAndVersionInIndex)
return pd.DataFrame(data=series)
def asRowSet(self):
# Note that as of stack 60, an empty query will omit the headers field
# see PLFM-3014
return RowSet(headers=self.headers,
tableId=self.tableId,
etag=self.etag,
rows=[row for row in self])
def asInteger(self):
try:
return int(self.rowset['rows'][0]['values'][0])
except (KeyError, TypeError):
raise ValueError("asInteger is only valid for queries such as count queries whose first value is an"
" integer.")
def __iter__(self):
return self
def next(self):
"""
Python 2 iterator
"""
self.i += 1
if self.i >= len(self.rowset['rows']):
if self.nextPageToken:
result = self.syn._queryTableNext(self.nextPageToken, self.tableId)
self.rowset = RowSet.from_json(result['queryResults'])
self.nextPageToken = result.get('nextPageToken', None)
self.i = 0
else:
raise StopIteration()
return self.rowset['rows'][self.i]
def __next__(self):
"""
Python 3 iterator
"""
return self.next()
def __len__(self):
return len(self.rowset['rows'])
def iter_row_metadata(self):
"""Iterates the table results to get row_id and row_etag. If an etag does not exist for a row, it will
generated as (row_id, row_version,None)
:return: a generator that gives :py:class::`collections.namedtuple` with format (row_id, row_version, row_etag)
"""
for row in self:
yield type(self).RowMetadataTuple(int(row['rowId']), int(row['versionNumber']), row.get('etag'))
class CsvFileTable(TableAbstractBaseClass):
"""
An object to wrap a CSV file that may be stored into a Synapse table or
returned as a result of a table query.
"""
@classmethod
def from_table_query(cls, synapse, query, quoteCharacter='"', escapeCharacter="\\", lineEnd=str(os.linesep),
separator=",", header=True, includeRowIdAndRowVersion=True, downloadLocation=None):
"""
Create a Table object wrapping a CSV file resulting from querying a Synapse table.
Mostly for internal use.
"""
download_from_table_result, path = synapse._queryTableCsv(
query=query,
quoteCharacter=quoteCharacter,
escapeCharacter=escapeCharacter,
lineEnd=lineEnd,
separator=separator,
header=header,
includeRowIdAndRowVersion=includeRowIdAndRowVersion,
downloadLocation=downloadLocation,
)
# A dirty hack to find out if we got back row ID and Version
# in particular, we don't get these back from aggregate queries
with io.open(path, 'r', encoding='utf-8') as f:
reader = csv.reader(f,
delimiter=separator,
escapechar=escapeCharacter,
lineterminator=lineEnd,
quotechar=quoteCharacter)
first_line = next(reader)
if len(download_from_table_result['headers']) + 2 == len(first_line):
includeRowIdAndRowVersion = True
else:
includeRowIdAndRowVersion = False
self = cls(
filepath=path,
schema=download_from_table_result.get('tableId', None),
etag=download_from_table_result.get('etag', None),
quoteCharacter=quoteCharacter,
escapeCharacter=escapeCharacter,
lineEnd=lineEnd,
separator=separator,
header=header,
includeRowIdAndRowVersion=includeRowIdAndRowVersion,
headers=[SelectColumn(**header) for header in download_from_table_result['headers']])
return self
@classmethod
def from_data_frame(cls, schema, df, filepath=None, etag=None, quoteCharacter='"', escapeCharacter="\\",
lineEnd=str(os.linesep), separator=",", header=True, includeRowIdAndRowVersion=None,
headers=None, **kwargs):
# infer columns from data frame if not specified
if not headers:
cols = as_table_columns(df)
headers = [SelectColumn.from_column(col) for col in cols]
# if the schema has no columns, use the inferred columns
if isinstance(schema, Schema) and not schema.has_columns():
schema.addColumns(cols)
# convert row names in the format [row_id]_[version] or [row_id]_[version]_[etag] back to columns
# etag is essentially a UUID
etag_pattern = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}'
row_id_version_pattern = re.compile(r'(\d+)_(\d+)(_(' + etag_pattern + r'))?')
row_id = []
row_version = []
row_etag = []
for row_name in df.index.values:
m = row_id_version_pattern.match(str(row_name))
row_id.append(m.group(1) if m else None)
row_version.append(m.group(2) if m else None)
row_etag.append(m.group(4) if m else None)
# include row ID and version, if we're asked to OR if it's encoded in row names
if includeRowIdAndRowVersion or (includeRowIdAndRowVersion is None and any(row_id)):
df2 = df.copy()
cls._insert_dataframe_column_if_not_exist(df2, 0, 'ROW_ID', row_id)
cls._insert_dataframe_column_if_not_exist(df2, 1, 'ROW_VERSION', row_version)
if any(row_etag):
cls._insert_dataframe_column_if_not_exist(df2, 2, 'ROW_ETAG', row_etag)
df = df2
includeRowIdAndRowVersion = True
f = None
try:
if not filepath:
temp_dir = tempfile.mkdtemp()
filepath = os.path.join(temp_dir, 'table.csv')
f = io.open(filepath, mode='w', encoding='utf-8', newline='')
test_import_pandas()
import pandas as pd
if isinstance(schema, Schema):
for col in schema.columns_to_store:
if col['columnType'] == 'DATE':
def _trailing_date_time_millisecond(t):
if isinstance(t, str):
return t[:-3]
df[col.name] = pd.to_datetime(df[col.name], errors='coerce').dt.strftime('%s%f')
df[col.name] = df[col.name].apply(lambda x: _trailing_date_time_millisecond(x))
df.to_csv(f,
index=False,
sep=separator,
header=header,
quotechar=quoteCharacter,
escapechar=escapeCharacter,
line_terminator=lineEnd,
na_rep=kwargs.get('na_rep', ''),
float_format="%.12g")
# NOTE: reason for flat_format='%.12g':
# pandas automatically converts int columns into float64 columns when some cells in the column have no
# value. If we write the whole number back as a decimal (e.g. '3.0'), Synapse complains that we are writing
# a float into a INTEGER(synapse table type) column. Using the 'g' will strip off '.0' from whole number
# values. pandas by default (with no float_format parameter) seems to keep 12 values after decimal, so we
# use '%.12g'.c
# see SYNPY-267.
finally:
if f:
f.close()
return cls(
schema=schema,
filepath=filepath,
etag=etag,
quoteCharacter=quoteCharacter,
escapeCharacter=escapeCharacter,
lineEnd=lineEnd,
separator=separator,
header=header,
includeRowIdAndRowVersion=includeRowIdAndRowVersion,
headers=headers)
@staticmethod
def _insert_dataframe_column_if_not_exist(dataframe, insert_index, col_name, insert_column_data):
# if the column already exists verify the column data is same as what we parsed
if col_name in dataframe.columns:
if dataframe[col_name].tolist() != insert_column_data:
raise SynapseError(
("A column named '{0}' already exists and does not match the '{0}' values present in"
" the DataFrame's row names. Please refain from using or modifying '{0}' as a"
" column for your data because it is necessary for version tracking in Synapse's"
" tables").format(col_name)
)
else:
dataframe.insert(insert_index, col_name, insert_column_data)
@classmethod
def from_list_of_rows(cls, schema, values, filepath=None, etag=None, quoteCharacter='"', escapeCharacter="\\",
lineEnd=str(os.linesep), separator=",", linesToSkip=0, includeRowIdAndRowVersion=None,
headers=None):
# create CSV file
f = None
try:
if not filepath:
temp_dir = tempfile.mkdtemp()
filepath = os.path.join(temp_dir, 'table.csv')
f = io.open(filepath, 'w', encoding='utf-8', newline='')
writer = csv.writer(f,
quoting=csv.QUOTE_NONNUMERIC,
delimiter=separator,
escapechar=escapeCharacter,
lineterminator=lineEnd,
quotechar=quoteCharacter,
skipinitialspace=linesToSkip)
# if we haven't explicitly set columns, try to grab them from
# the schema object
if not headers and "columns_to_store" in schema and schema.columns_to_store is not None:
headers = [SelectColumn.from_column(col) for col in schema.columns_to_store]
# write headers?
if headers:
writer.writerow([header.name for header in headers])
header = True
else:
header = False
# write row data
for row in values:
writer.writerow(row)
finally:
if f:
f.close()
return cls(
schema=schema,
filepath=filepath,
etag=etag,
quoteCharacter=quoteCharacter,
escapeCharacter=escapeCharacter,
lineEnd=lineEnd,
separator=separator,
header=header,
headers=headers,
includeRowIdAndRowVersion=includeRowIdAndRowVersion)
def __init__(self, schema, filepath, etag=None, quoteCharacter=DEFAULT_QUOTE_CHARACTER,
escapeCharacter=DEFAULT_ESCAPSE_CHAR, lineEnd=str(os.linesep), separator=DEFAULT_SEPARATOR,
header=True, linesToSkip=0, includeRowIdAndRowVersion=None, headers=None):
self.filepath = filepath
self.includeRowIdAndRowVersion = includeRowIdAndRowVersion
# CsvTableDescriptor fields
self.linesToSkip = linesToSkip
self.quoteCharacter = quoteCharacter
self.escapeCharacter = escapeCharacter
self.lineEnd = lineEnd
self.separator = separator
self.header = header
super(CsvFileTable, self).__init__(schema, headers=headers, etag=etag)
self.setColumnHeaders(headers)
def _synapse_store(self, syn):
copied_self = copy.copy(self)
return copied_self._update_self(syn)
def _update_self(self, syn):
if isinstance(self.schema, Schema) and self.schema.get('id', None) is None:
# store schema
self.schema = syn.store(self.schema)
self.tableId = self.schema.id
result = syn._uploadCsv(
self.filepath,
self.schema if self.schema else self.tableId,
updateEtag=self.etag,
quoteCharacter=self.quoteCharacter,
escapeCharacter=self.escapeCharacter,
lineEnd=self.lineEnd,
separator=self.separator,
header=self.header,
linesToSkip=self.linesToSkip)
upload_to_table_result = result['results'][0]
assert upload_to_table_result['concreteType'] in ('org.sagebionetworks.repo.model.table.EntityUpdateResults',
'org.sagebionetworks.repo.model.table.UploadToTableResult'),\
"Not an UploadToTableResult or EntityUpdateResults."
if 'etag' in upload_to_table_result:
self.etag = upload_to_table_result['etag']
return self
def asDataFrame(self, rowIdAndVersionInIndex=True, convert_to_datetime=False):
"""Convert query result to a Pandas DataFrame.
:param rowIdAndVersionInIndex: Make the dataframe index consist of the row_id and row_version
(and row_etag if it exists)
:param convert_to_datetime: If set to True, will convert all Synapse DATE columns from UNIX timestamp
integers into UTC datetime objects
:return:
"""
test_import_pandas()
import pandas as pd
try:
# Handle bug in pandas 0.19 requiring quotechar to be str not unicode or newstr
quoteChar = self.quoteCharacter
# determine which columns are DATE columns so we can convert milisecond timestamps into datetime objects
date_columns = []
list_columns = []
dtype = {}
if self.headers is not None:
for select_column in self.headers:
if select_column.columnType == "STRING":
# we want to identify string columns so that pandas doesn't try to
# automatically parse strings in a string column to other data types
dtype[select_column.name] = str
elif select_column.columnType in LIST_COLUMN_TYPES:
list_columns.append(select_column.name)
elif select_column.columnType == "DATE" and convert_to_datetime:
date_columns.append(select_column.name)
return _csv_to_pandas_df(self.filepath,
separator=self.separator,
quote_char=quoteChar,
escape_char=self.escapeCharacter,
contain_headers=self.header,
lines_to_skip=self.linesToSkip,
date_columns=date_columns,
list_columns=list_columns,
rowIdAndVersionInIndex=rowIdAndVersionInIndex,
dtype=dtype)
except pd.parser.CParserError:
return pd.DataFrame()
def asRowSet(self):
# Extract row id and version, if present in rows
row_id_col = None
row_ver_col = None
for i, header in enumerate(self.headers):
if header.name == 'ROW_ID':
row_id_col = i
elif header.name == 'ROW_VERSION':
row_ver_col = i
def to_row_object(row, row_id_col=None, row_ver_col=None):
if isinstance(row, Row):
return row
rowId = row[row_id_col] if row_id_col is not None else None
versionNumber = row[row_ver_col] if row_ver_col is not None else None
values = [elem for i, elem in enumerate(row) if i not in [row_id_col, row_ver_col]]
return Row(values, rowId=rowId, versionNumber=versionNumber)
return RowSet(headers=[elem for i, elem in enumerate(self.headers) if i not in [row_id_col, row_ver_col]],
tableId=self.tableId,
etag=self.etag,
rows=[to_row_object(row, row_id_col, row_ver_col) for row in self])
def setColumnHeaders(self, headers):
"""
Set the list of :py:class:`synapseclient.table.SelectColumn` objects that will be used to convert fields to the
appropriate data types.
Column headers are automatically set when querying.
"""
if self.includeRowIdAndRowVersion:
names = [header.name for header in headers]
if "ROW_ID" not in names and "ROW_VERSION" not in names:
headers = [SelectColumn(name="ROW_ID", columnType="STRING"),
SelectColumn(name="ROW_VERSION", columnType="STRING")] + headers
self.headers = headers
def __iter__(self):
def iterate_rows(filepath, headers):
if not self.header or not self.headers:
raise ValueError("Iteration not supported for table without headers.")
header_name = {header.name for header in headers}
row_metadata_headers = {'ROW_ID', 'ROW_VERSION', 'ROW_ETAG'}
num_row_metadata_in_headers = len(header_name & row_metadata_headers)
with io.open(filepath, encoding='utf-8', newline=self.lineEnd) as f:
reader = csv.reader(f,
delimiter=self.separator,
escapechar=self.escapeCharacter,
lineterminator=self.lineEnd,
quotechar=self.quoteCharacter)
csv_header = set(next(reader))
# the number of row metadata differences between the csv headers and self.headers
num_metadata_cols_diff = len(csv_header & row_metadata_headers) - num_row_metadata_in_headers
# we only process 2 cases:
# 1. matching row metadata
# 2. if metadata does not match, self.headers must not contains row metadata
if num_metadata_cols_diff == 0 or num_row_metadata_in_headers == 0:
for row in reader:
yield cast_values(row[num_metadata_cols_diff:], headers)
else:
raise ValueError("There is mismatching row metadata in the csv file and in headers.")
return iterate_rows(self.filepath, self.headers)
def __len__(self):
with io.open(self.filepath, encoding='utf-8', newline=self.lineEnd) as f:
if self.header: # ignore the header line
f.readline()
return sum(1 for line in f)
def iter_row_metadata(self):
"""Iterates the table results to get row_id and row_etag. If an etag does not exist for a row,
it will generated as (row_id, None)
:return: a generator that gives :py:class::`collections.namedtuple` with format (row_id, row_etag)
"""
with io.open(self.filepath, encoding='utf-8', newline=self.lineEnd) as f:
reader = csv.reader(f,
delimiter=self.separator,
escapechar=self.escapeCharacter,
lineterminator=self.lineEnd,
quotechar=self.quoteCharacter)
header = next(reader)
# The ROW_... headers are always in a predefined order
row_id_index = header.index('ROW_ID')
row_version_index = header.index('ROW_VERSION')
try:
row_etag_index = header.index('ROW_ETAG')
except ValueError:
row_etag_index = None
for row in reader:
yield type(self).RowMetadataTuple(int(row[row_id_index]),
int(row[row_version_index]),
row[row_etag_index] if (row_etag_index is not None) else None)
| {
"content_hash": "372d2bf0b6c2dd965a7d9831676da120",
"timestamp": "",
"source": "github",
"line_count": 2351,
"max_line_length": 120,
"avg_line_length": 41.51127179923437,
"alnum_prop": 0.6041416904900966,
"repo_name": "thomasyu888/synapsePythonClient",
"id": "f9a2f539d8ff7fde8fead2663766400b0b068119",
"size": "97593",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "synapseclient/table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "1573386"
}
],
"symlink_target": ""
} |
"""
Unofficial Python API for retrieving data from Delicious.com.
This module provides the following features plus some more:
* retrieving a URL's full public bookmarking history including
* users who bookmarked the URL including tags used for such bookmarks
and the creation time of the bookmark (up to YYYY-MM-DD granularity)
* top tags (up to a maximum of 10) including tag count
* title as stored on Delicious.com
* total number of bookmarks/users for this URL at Delicious.com
* retrieving a user's full bookmark collection, including any private bookmarks
if you know the corresponding password
* retrieving a user's full public tagging vocabulary, i.e. tags and tag counts
* retrieving a user's network information (network members and network fans)
* HTTP proxy support
* updated to support Delicious.com "version 2" (mini-relaunch as of August 2008)
The official Delicious.com API and the JSON/RSS feeds do not provide all
the functionality mentioned above, and in such cases this module will query
the Delicious.com *website* directly and extract the required information
by parsing the HTML code of the resulting Web pages (a kind of poor man's
web mining). The module is able to detect IP throttling, which is employed
by Delicious.com to temporarily block abusive HTTP request behavior, and
will raise a custom Python error to indicate that. Please be a nice netizen
and do not stress the Delicious.com service more than necessary.
It is strongly advised that you read the Delicious.com Terms of Use
before using this Python module. In particular, read section 5
'Intellectual Property'.
The code is licensed to you under version 2 of the GNU General Public
License.
More information about this module can be found at
http://www.michael-noll.com/wiki/Del.icio.us_Python_API
Changelog is available at
http://code.michael-noll.com/?p=deliciousapi;a=log
Copyright 2006-2010 Michael G. Noll <http://www.michael-noll.com/>
"""
__author__ = "Michael G. Noll"
__copyright__ = "(c) 2006-2010 Michael G. Noll"
__description__ = "Unofficial Python API for retrieving data from Delicious.com"
__email__ = "coding[AT]michael-REMOVEME-noll[DOT]com"
__license__ = "GPLv2"
__maintainer__ = "Michael G. Noll"
__status__ = "Development"
__url__ = "http://www.michael-noll.com/"
__version__ = "1.6.3"
import base64
import cgi
import datetime
import hashlib
from operator import itemgetter
import re
import socket
import time
import urllib2
import xml.dom.minidom
try:
from BeautifulSoup import BeautifulSoup
except:
print "ERROR: could not import BeautifulSoup Python module"
print
print "You can download BeautifulSoup from the Python Cheese Shop at"
print "http://cheeseshop.python.org/pypi/BeautifulSoup/"
print "or directly from http://www.crummy.com/software/BeautifulSoup/"
print
raise
try:
from app.lib import simplejson
except:
print "ERROR: could not import simplejson module"
print
print "Since version 1.5.0, DeliciousAPI requires the simplejson module."
print "You can download simplejson from the Python Cheese Shop at"
print "http://pypi.python.org/pypi/simplejson"
print
raise
class DeliciousUser(object):
"""This class wraps all available information about a user into one object.
Variables:
bookmarks:
A list of (url, tags, title, comment, timestamp) tuples representing
a user's bookmark collection.
url is a 'unicode'
tags is a 'list' of 'unicode' ([] if no tags)
title is a 'unicode'
comment is a 'unicode' (u"" if no comment)
timestamp is a 'datetime.datetime'
tags (read-only property):
A list of (tag, tag_count) tuples, aggregated over all a user's
retrieved bookmarks. The tags represent a user's tagging vocabulary.
username:
The Delicious.com account name of the user.
"""
def __init__(self, username, bookmarks=None):
assert username
self.username = username
self.bookmarks = bookmarks or []
def __str__(self):
total_tag_count = 0
total_tags = set()
for url, tags, title, comment, timestamp in self.bookmarks:
if tags:
total_tag_count += len(tags)
for tag in tags:
total_tags.add(tag)
return "[%s] %d bookmarks, %d tags (%d unique)" % \
(self.username, len(self.bookmarks), total_tag_count, len(total_tags))
def __repr__(self):
return self.username
def get_tags(self):
"""Returns a dictionary mapping tags to their tag count.
For example, if the tag count of tag 'foo' is 23, then
23 bookmarks were annotated with 'foo'. A different way
to put it is that 23 users used the tag 'foo' when
bookmarking the URL.
"""
total_tags = {}
for url, tags, title, comment, timestamp in self.bookmarks:
for tag in tags:
total_tags[tag] = total_tags.get(tag, 0) + 1
return total_tags
tags = property(fget=get_tags, doc="Returns a dictionary mapping tags to their tag count")
class DeliciousURL(object):
"""This class wraps all available information about a web document into one object.
Variables:
bookmarks:
A list of (user, tags, comment, timestamp) tuples, representing a
document's bookmark history. Generally, this variable is populated
via get_url(), so the number of bookmarks available in this variable
depends on the parameters of get_url(). See get_url() for more
information.
user is a 'unicode'
tags is a 'list' of 'unicode's ([] if no tags)
comment is a 'unicode' (u"" if no comment)
timestamp is a 'datetime.datetime' (granularity: creation *day*,
i.e. the day but not the time of day)
tags (read-only property):
A list of (tag, tag_count) tuples, aggregated over all a document's
retrieved bookmarks.
top_tags:
A list of (tag, tag_count) tuples, representing a document's so-called
"top tags", i.e. the up to 10 most popular tags for this document.
url:
The URL of the document.
hash (read-only property):
The MD5 hash of the URL.
title:
The document's title.
total_bookmarks:
The number of total bookmarks (posts) of the document.
Note that the value of total_bookmarks can be greater than the
length of "bookmarks" depending on how much (detailed) bookmark
data could be retrieved from Delicious.com.
Here's some more background information:
The value of total_bookmarks is the "real" number of bookmarks of
URL "url" stored at Delicious.com as reported by Delicious.com
itself (so it's the "ground truth"). On the other hand, the length
of "bookmarks" depends on iteratively scraped bookmarking data.
Since scraping Delicous.com's Web pages has its limits in practice,
this means that DeliciousAPI could most likely not retrieve all
available bookmarks. In such a case, the value reported by
total_bookmarks is greater than the length of "bookmarks".
"""
def __init__(self, url, top_tags=None, bookmarks=None, title=u"", total_bookmarks=0):
assert url
self.url = url
self.top_tags = top_tags or []
self.bookmarks = bookmarks or []
self.title = title
self.total_bookmarks = total_bookmarks
def __str__(self):
total_tag_count = 0
total_tags = set()
for user, tags, comment, timestamp in self.bookmarks:
if tags:
total_tag_count += len(tags)
for tag in tags:
total_tags.add(tag)
return "[%s] %d total bookmarks (= users), %d tags (%d unique), %d out of 10 max 'top' tags" % \
(self.url, self.total_bookmarks, total_tag_count, \
len(total_tags), len(self.top_tags))
def __repr__(self):
return self.url
def get_tags(self):
"""Returns a dictionary mapping tags to their tag count.
For example, if the tag count of tag 'foo' is 23, then
23 bookmarks were annotated with 'foo'. A different way
to put it is that 23 users used the tag 'foo' when
bookmarking the URL.
@return: Dictionary mapping tags to their tag count.
"""
total_tags = {}
for user, tags, comment, timestamp in self.bookmarks:
for tag in tags:
total_tags[tag] = total_tags.get(tag, 0) + 1
return total_tags
tags = property(fget=get_tags, doc="Returns a dictionary mapping tags to their tag count")
def get_hash(self):
m = hashlib.md5()
m.update(self.url)
return m.hexdigest()
hash = property(fget=get_hash, doc="Returns the MD5 hash of the URL of this document")
class DeliciousAPI(object):
"""
This class provides a custom, unofficial API to the Delicious.com service.
Instead of using just the functionality provided by the official
Delicious.com API (which has limited features), this class retrieves
information from the Delicious.com website directly and extracts data from
the Web pages.
Note that Delicious.com will block clients with too many queries in a
certain time frame (similar to their API throttling). So be a nice citizen
and don't stress their website.
"""
def __init__(self,
http_proxy="",
tries=3,
wait_seconds=3,
user_agent="DeliciousAPI/%s (+http://www.michael-noll.com/wiki/Del.icio.us_Python_API)" % __version__,
timeout=30,
):
"""Set up the API module.
@param http_proxy: Optional, default: "".
Use an HTTP proxy for HTTP connections. Proxy support for
HTTPS is not available yet.
Format: "hostname:port" (e.g., "localhost:8080")
@type http_proxy: str
@param tries: Optional, default: 3.
Try the specified number of times when downloading a monitored
document fails. tries must be >= 1. See also wait_seconds.
@type tries: int
@param wait_seconds: Optional, default: 3.
Wait the specified number of seconds before re-trying to
download a monitored document. wait_seconds must be >= 0.
See also tries.
@type wait_seconds: int
@param user_agent: Optional, default: "DeliciousAPI/<version>
(+http://www.michael-noll.com/wiki/Del.icio.us_Python_API)".
The User-Agent HTTP Header to use when querying Delicous.com.
@type user_agent: str
@param timeout: Optional, default: 30.
Set network timeout. timeout must be >= 0.
@type timeout: int
"""
assert tries >= 1
assert wait_seconds >= 0
assert timeout >= 0
self.http_proxy = http_proxy
self.tries = tries
self.wait_seconds = wait_seconds
self.user_agent = user_agent
self.timeout = timeout
#socket.setdefaulttimeout(self.timeout)
def _query(self, path, host="delicious.com", user=None, password=None, use_ssl=False):
"""Queries Delicious.com for information, specified by (query) path.
@param path: The HTTP query path.
@type path: str
@param host: The host to query, default: "delicious.com".
@type host: str
@param user: The Delicious.com username if any, default: None.
@type user: str
@param password: The Delicious.com password of user, default: None.
@type password: unicode/str
@param use_ssl: Whether to use SSL encryption or not, default: False.
@type use_ssl: bool
@return: None on errors (i.e. on all HTTP status other than 200).
On success, returns the content of the HTML response.
"""
opener = None
handlers = []
# add HTTP Basic authentication if available
if user and password:
pwd_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwd_mgr.add_password(None, host, user, password)
basic_auth_handler = urllib2.HTTPBasicAuthHandler(pwd_mgr)
handlers.append(basic_auth_handler)
# add proxy support if requested
if self.http_proxy:
proxy_handler = urllib2.ProxyHandler({'http': 'http://%s' % self.http_proxy})
handlers.append(proxy_handler)
if handlers:
opener = urllib2.build_opener(*handlers)
else:
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', self.user_agent)]
data = None
tries = self.tries
if use_ssl:
protocol = "https"
else:
protocol = "http"
url = "%s://%s%s" % (protocol, host, path)
while tries > 0:
try:
f = opener.open(url)
data = f.read()
f.close()
break
except urllib2.HTTPError, e:
if e.code == 301:
raise DeliciousMovedPermanentlyWarning, "Delicious.com status %s - url moved permanently" % e.code
if e.code == 302:
raise DeliciousMovedTemporarilyWarning, "Delicious.com status %s - url moved temporarily" % e.code
elif e.code == 401:
raise DeliciousUnauthorizedError, "Delicious.com error %s - unauthorized (authentication failed?)" % e.code
elif e.code == 403:
raise DeliciousForbiddenError, "Delicious.com error %s - forbidden" % e.code
elif e.code == 404:
raise DeliciousNotFoundError, "Delicious.com error %s - url not found" % e.code
elif e.code == 500:
raise Delicious500Error, "Delicious.com error %s - server problem" % e.code
elif e.code == 503 or e.code == 999:
raise DeliciousThrottleError, "Delicious.com error %s - unable to process request (your IP address has been throttled/blocked)" % e.code
else:
raise DeliciousUnknownError, "Delicious.com error %s - unknown error" % e.code
break
except urllib2.URLError, e:
time.sleep(self.wait_seconds)
except socket.error, msg:
# sometimes we get a "Connection Refused" error
# wait a bit and then try again
time.sleep(self.wait_seconds)
#finally:
# f.close()
tries -= 1
return data
def get_url(self, url, max_bookmarks=50, sleep_seconds=1):
"""
Returns a DeliciousURL instance representing the Delicious.com history of url.
Generally, this method is what you want for getting title, bookmark, tag,
and user information about a URL.
Delicious only returns up to 50 bookmarks per URL. This means that
we have to do subsequent queries plus parsing if we want to retrieve
more than 50. Roughly speaking, the processing time of get_url()
increases linearly with the number of 50-bookmarks-chunks; i.e.
it will take 10 times longer to retrieve 500 bookmarks than 50.
@param url: The URL of the web document to be queried for.
@type url: str
@param max_bookmarks: Optional, default: 50.
See the documentation of get_bookmarks() for more information
as get_url() uses get_bookmarks() to retrieve a url's
bookmarking history.
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
See the documentation of get_bookmarks() for more information
as get_url() uses get_bookmarks() to retrieve a url's
bookmarking history. sleep_seconds must be >= 1 to comply with
Delicious.com's Terms of Use.
@type sleep_seconds: int
@return: DeliciousURL instance representing the Delicious.com history
of url.
"""
# we must wait at least 1 second between subsequent queries to
# comply with Delicious.com's Terms of Use
assert sleep_seconds >= 1
document = DeliciousURL(url)
m = hashlib.md5()
m.update(url)
hash = m.hexdigest()
path = "/v2/json/urlinfo/%s" % hash
data = self._query(path, host="feeds.delicious.com")
if data:
urlinfo = {}
try:
urlinfo = simplejson.loads(data)
if urlinfo:
urlinfo = urlinfo[0]
else:
urlinfo = {}
except TypeError:
pass
try:
document.title = urlinfo['title'] or u""
except KeyError:
pass
try:
top_tags = urlinfo['top_tags'] or {}
if top_tags:
document.top_tags = sorted(top_tags.iteritems(), key=itemgetter(1), reverse=True)
else:
document.top_tags = []
except KeyError:
pass
try:
document.total_bookmarks = int(urlinfo['total_posts'])
except (KeyError, ValueError):
pass
document.bookmarks = self.get_bookmarks(url=url, max_bookmarks=max_bookmarks, sleep_seconds=sleep_seconds)
return document
def get_network(self, username):
"""
Returns the user's list of followees and followers.
Followees are users in his Delicious "network", i.e. those users whose
bookmark streams he's subscribed to. Followers are his Delicious.com
"fans", i.e. those users who have subscribed to the given user's
bookmark stream).
Example:
A --------> --------> C
D --------> B --------> E
F --------> --------> F
followers followees
of B of B
Arrows from user A to user B denote that A has subscribed to B's
bookmark stream, i.e. A is "following" or "tracking" B.
Note that user F is both a followee and a follower of B, i.e. F tracks
B and vice versa. In Delicious.com terms, F is called a "mutual fan"
of B.
Comparing this network concept to information retrieval, one could say
that followers are incoming links and followees outgoing links of B.
@param username: Delicous.com username for which network information is
retrieved.
@type username: unicode/str
@return: Tuple of two lists ([<followees>, [<followers>]), where each list
contains tuples of (username, tracking_since_timestamp).
If a network is set as private, i.e. hidden from public view,
(None, None) is returned.
If a network is public but empty, ([], []) is returned.
"""
assert username
followees = followers = None
# followees (network members)
path = "/v2/json/networkmembers/%s" % username
data = None
try:
data = self._query(path, host="feeds.delicious.com")
except DeliciousForbiddenError:
pass
if data:
followees = []
users = []
try:
users = simplejson.loads(data)
except TypeError:
pass
uname = tracking_since = None
for user in users:
# followee's username
try:
uname = user['user']
except KeyError:
pass
# try to convert uname to Unicode
if uname:
try:
# we assume UTF-8 encoding
uname = uname.decode('utf-8')
except UnicodeDecodeError:
pass
# time when the given user started tracking this user
try:
tracking_since = datetime.datetime.strptime(user['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
if uname:
followees.append( (uname, tracking_since) )
# followers (network fans)
path = "/v2/json/networkfans/%s" % username
data = None
try:
data = self._query(path, host="feeds.delicious.com")
except DeliciousForbiddenError:
pass
if data:
followers = []
users = []
try:
users = simplejson.loads(data)
except TypeError:
pass
uname = tracking_since = None
for user in users:
# fan's username
try:
uname = user['user']
except KeyError:
pass
# try to convert uname to Unicode
if uname:
try:
# we assume UTF-8 encoding
uname = uname.decode('utf-8')
except UnicodeDecodeError:
pass
# time when fan started tracking the given user
try:
tracking_since = datetime.datetime.strptime(user['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
if uname:
followers.append( (uname, tracking_since) )
return ( followees, followers )
def get_bookmarks(self, url=None, username=None, max_bookmarks=50, sleep_seconds=1):
"""
Returns the bookmarks of url or user, respectively.
Delicious.com only returns up to 50 bookmarks per URL on its website.
This means that we have to do subsequent queries plus parsing if
we want to retrieve more than 50. Roughly speaking, the processing
time of get_bookmarks() increases linearly with the number of
50-bookmarks-chunks; i.e. it will take 10 times longer to retrieve
500 bookmarks than 50.
@param url: The URL of the web document to be queried for.
Cannot be used together with 'username'.
@type url: str
@param username: The Delicious.com username to be queried for.
Cannot be used together with 'url'.
@type username: str
@param max_bookmarks: Optional, default: 50.
Maximum number of bookmarks to retrieve. Set to 0 to disable
this limitation/the maximum and retrieve all available
bookmarks of the given url.
Bookmarks are sorted so that newer bookmarks are first.
Setting max_bookmarks to 50 means that get_bookmarks() will retrieve
the 50 most recent bookmarks of the given url.
In the case of getting bookmarks of a URL (url is set),
get_bookmarks() will take *considerably* longer to run
for pages with lots of bookmarks when setting max_bookmarks
to a high number or when you completely disable the limit.
Delicious returns only up to 50 bookmarks per result page,
so for example retrieving 250 bookmarks requires 5 HTTP
connections and parsing 5 HTML pages plus wait time between
queries (to comply with delicious' Terms of Use; see
also parameter 'sleep_seconds').
In the case of getting bookmarks of a user (username is set),
the same restrictions as for a URL apply with the exception
that we can retrieve up to 100 bookmarks per HTTP query
(instead of only up to 50 per HTTP query for a URL).
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
Wait the specified number of seconds between subsequent
queries in case that there are multiple pages of bookmarks
for the given url. sleep_seconds must be >= 1 to comply with
Delicious.com's Terms of Use.
See also parameter 'max_bookmarks'.
@type sleep_seconds: int
@return: Returns the bookmarks of url or user, respectively.
For urls, it returns a list of (user, tags, comment, timestamp)
tuples.
For users, it returns a list of (url, tags, title, comment,
timestamp) tuples.
Bookmarks are sorted "descendingly" by creation time, i.e. newer
bookmarks come first.
"""
# we must wait at least 1 second between subsequent queries to
# comply with delicious' Terms of Use
assert sleep_seconds >= 1
# url XOR username
assert bool(username) is not bool(url)
# maximum number of urls/posts Delicious.com will display
# per page on its website
max_html_count = 100
# maximum number of pages that Delicious.com will display;
# currently, the maximum number of pages is 20. Delicious.com
# allows to go beyond page 20 via pagination, but page N (for
# N > 20) will always display the same content as page 20.
max_html_pages = 20
path = None
if url:
m = hashlib.md5()
m.update(url)
hash = m.hexdigest()
# path will change later on if there are multiple pages of boomarks
# for the given url
path = "/url/%s" % hash
elif username:
# path will change later on if there are multiple pages of boomarks
# for the given username
path = "/%s?setcount=%d" % (username, max_html_count)
else:
raise Exception('You must specify either url or user.')
page_index = 1
bookmarks = []
while path and page_index <= max_html_pages:
data = self._query(path)
path = None
if data:
# extract bookmarks from current page
if url:
bookmarks.extend(self._extract_bookmarks_from_url_history(data))
else:
bookmarks.extend(self._extract_bookmarks_from_user_history(data))
# stop scraping if we already have as many bookmarks as we want
if (len(bookmarks) >= max_bookmarks) and max_bookmarks != 0:
break
else:
# check if there are multiple pages of bookmarks for this
# url on Delicious.com
soup = BeautifulSoup(data)
paginations = soup.findAll("div", id="pagination")
if paginations:
# find next path
nexts = paginations[0].findAll("a", attrs={ "class": "pn next" })
if nexts and (max_bookmarks == 0 or len(bookmarks) < max_bookmarks) and len(bookmarks) > 0:
# e.g. /url/2bb293d594a93e77d45c2caaf120e1b1?show=all&page=2
path = nexts[0]['href']
if username:
path += "&setcount=%d" % max_html_count
page_index += 1
# wait one second between queries to be compliant with
# delicious' Terms of Use
time.sleep(sleep_seconds)
if max_bookmarks > 0:
return bookmarks[:max_bookmarks]
else:
return bookmarks
def _extract_bookmarks_from_url_history(self, data):
"""
Extracts user bookmarks from a URL's history page on Delicious.com.
The Python library BeautifulSoup is used to parse the HTML page.
@param data: The HTML source of a URL history Web page on Delicious.com.
@type data: str
@return: list of user bookmarks of the corresponding URL
"""
bookmarks = []
soup = BeautifulSoup(data)
bookmark_elements = soup.findAll("div", attrs={"class": re.compile("^bookmark\s*")})
timestamp = None
for bookmark_element in bookmark_elements:
# extract bookmark creation time
#
# this timestamp has to "persist" until a new timestamp is
# found (delicious only provides the creation time data for the
# first bookmark in the list of bookmarks for a given day
dategroups = bookmark_element.findAll("div", attrs={"class": "dateGroup"})
if dategroups:
spans = dategroups[0].findAll('span')
if spans:
date_str = spans[0].contents[0].strip()
timestamp = datetime.datetime.strptime(date_str, '%d %b %y')
# extract comments
comment = u""
datas = bookmark_element.findAll("div", attrs={"class": "data"})
if datas:
divs = datas[0].findAll("div", attrs={"class": "description"})
if divs:
comment = divs[0].contents[0].strip()
# extract tags
user_tags = []
tagdisplays = bookmark_element.findAll("div", attrs={"class": "tagdisplay"})
if tagdisplays:
spans = tagdisplays[0].findAll("span", attrs={"class": "tagItem"})
for span in spans:
tag = span.contents[0]
user_tags.append(tag)
# extract user information
metas = bookmark_element.findAll("div", attrs={"class": "meta"})
if metas:
links = metas[0].findAll("a", attrs={"class": "user user-tag"})
if links:
user_a = links[0]
spans = user_a.findAll('span')
if spans:
try:
user = spans[0].contents[0]
except IndexError:
# WORKAROUND: it seems there is a bug on Delicious.com where
# sometimes a bookmark is shown in a URL history without any
# associated Delicious username (username is empty); this could
# be caused by special characters in the username or other things
#
# this problem of Delicious is very rare, so we just skip such
# entries until they find a fix
pass
bookmarks.append( (user, user_tags, comment, timestamp) )
return bookmarks
def _extract_bookmarks_from_user_history(self, data):
"""
Extracts a user's bookmarks from his user page on Delicious.com.
The Python library BeautifulSoup is used to parse the HTML page.
@param data: The HTML source of a user page on Delicious.com.
@type data: str
@return: list of bookmarks of the corresponding user
"""
bookmarks = []
soup = BeautifulSoup(data)
ul = soup.find("ul", id="bookmarklist")
if ul:
bookmark_elements = ul.findAll("div", attrs={"class": re.compile("^bookmark\s*")})
timestamp = None
for bookmark_element in bookmark_elements:
# extract bookmark creation time
#
# this timestamp has to "persist" until a new timestamp is
# found (delicious only provides the creation time data for the
# first bookmark in the list of bookmarks for a given day
dategroups = bookmark_element.findAll("div", attrs={"class": "dateGroup"})
if dategroups:
spans = dategroups[0].findAll('span')
if spans:
date_str = spans[0].contents[0].strip()
timestamp = datetime.datetime.strptime(date_str, '%d %b %y')
# extract url, title and comments
url = u""
title = u""
comment = u""
datas = bookmark_element.findAll("div", attrs={"class": "data"})
if datas:
links = datas[0].findAll("a", attrs={"class": re.compile("^taggedlink\s*")})
if links:
title = links[0].contents[0].strip()
url = links[0]['href']
divs = datas[0].findAll("div", attrs={"class": "description"})
if divs:
comment = divs[0].contents[0].strip()
# extract tags
url_tags = []
tagdisplays = bookmark_element.findAll("div", attrs={"class": "tagdisplay"})
if tagdisplays:
spans = tagdisplays[0].findAll("span", attrs={"class": "tagItem"})
for span in spans:
tag = span.contents[0]
url_tags.append(tag)
bookmarks.append( (url, url_tags, title, comment, timestamp) )
return bookmarks
def get_user(self, username, password=None, max_bookmarks=50, sleep_seconds=1):
"""Retrieves a user's bookmarks from Delicious.com.
If a correct username AND password are supplied, a user's *full*
bookmark collection (which also includes private bookmarks) is
retrieved. Data communication is encrypted using SSL in this case.
If no password is supplied, only the *public* bookmarks of the user
are retrieved. Here, the parameter 'max_bookmarks' specifies how
many public bookmarks will be retrieved (default: 50). Set the
parameter to 0 to retrieve all public bookmarks.
This function can be used to backup all of a user's bookmarks if
called with a username and password.
@param username: The Delicious.com username.
@type username: str
@param password: Optional, default: None.
The user's Delicious.com password. If password is set,
all communication with Delicious.com is SSL-encrypted.
@type password: unicode/str
@param max_bookmarks: Optional, default: 50.
See the documentation of get_bookmarks() for more
information as get_url() uses get_bookmarks() to
retrieve a url's bookmarking history.
The parameter is NOT used when a password is specified
because in this case the *full* bookmark collection of
a user will be retrieved.
@type max_bookmarks: int
@param sleep_seconds: Optional, default: 1.
See the documentation of get_bookmarks() for more information as
get_url() uses get_bookmarks() to retrieve a url's bookmarking
history. sleep_seconds must be >= 1 to comply with Delicious.com's
Terms of Use.
@type sleep_seconds: int
@return: DeliciousUser instance
"""
assert username
user = DeliciousUser(username)
bookmarks = []
if password:
# We have username AND password, so we call
# the official Delicious.com API.
path = "/v1/posts/all"
data = self._query(path, host="api.del.icio.us", use_ssl=True, user=username, password=password)
if data:
soup = BeautifulSoup(data)
elements = soup.findAll("post")
for element in elements:
url = element["href"]
title = element["description"] or u""
comment = element["extended"] or u""
tags = []
if element["tag"]:
tags = element["tag"].split()
timestamp = datetime.datetime.strptime(element["time"], "%Y-%m-%dT%H:%M:%SZ")
bookmarks.append( (url, tags, title, comment, timestamp) )
user.bookmarks = bookmarks
else:
# We have only the username, so we extract data from
# the user's JSON feed. However, the feed is restricted
# to the most recent public bookmarks of the user, which
# is about 100 if any. So if we need more than 100, we start
# scraping the Delicious.com website directly
if max_bookmarks > 0 and max_bookmarks <= 100:
path = "/v2/json/%s/stackoverflow?count=100" % username
data = self._query(path, host="feeds.delicious.com", user=username)
if data:
posts = []
try:
posts = simplejson.loads(data)
except TypeError:
pass
url = timestamp = None
title = comment = u""
tags = []
for post in posts:
# url
try:
url = post['u']
except KeyError:
pass
# title
try:
title = post['d']
except KeyError:
pass
# tags
try:
tags = post['t']
except KeyError:
pass
if not tags:
tags = [u"system:unfiled"]
# comment / notes
try:
comment = post['n']
except KeyError:
pass
# bookmark creation time
try:
timestamp = datetime.datetime.strptime(post['dt'], "%Y-%m-%dT%H:%M:%SZ")
except KeyError:
pass
bookmarks.append( (url, tags, title, comment, timestamp) )
user.bookmarks = bookmarks[:max_bookmarks]
else:
# TODO: retrieve the first 100 bookmarks via JSON before
# falling back to scraping the delicous.com website
user.bookmarks = self.get_bookmarks(username=username, max_bookmarks=max_bookmarks, sleep_seconds=sleep_seconds)
return user
def get_urls(self, tag=None, popular=True, max_urls=100, sleep_seconds=1):
"""
Returns the list of recent URLs (of web documents) tagged with a given tag.
This is very similar to parsing Delicious' RSS/JSON feeds directly,
but this function will return up to 2,000 links compared to a maximum
of 100 links when using the official feeds (with query parameter
count=100).
The return list of links will be sorted by recency in descending order,
i.e. newest items first.
Note that even when setting max_urls, get_urls() cannot guarantee that
it can retrieve *at least* this many URLs. It is really just an upper
bound.
@param tag: Retrieve links which have been tagged with the given tag.
If tag is not set (default), links will be retrieved from the
Delicious.com front page (aka "delicious hotlist").
@type tag: unicode/str
@param popular: If true (default), retrieve only popular links (i.e.
/popular/<tag>). Otherwise, the most recent links tagged with
the given tag will be retrieved (i.e. /tag/<tag>).
As of January 2009, it seems that Delicious.com modified the list
of popular tags to contain only up to a maximum of 15 URLs.
This also means that setting max_urls to values larger than 15
will not change the results of get_urls().
So if you are interested in more URLs, set the "popular" parameter
to false.
Note that if you set popular to False, the returned list of URLs
might contain duplicate items. This is due to the way Delicious.com
creates its /tag/<tag> Web pages. So if you need a certain
number of unique URLs, you have to take care of that in your
own code.
@type popular: bool
@param max_urls: Retrieve at most max_urls links. The default is 100,
which is the maximum number of links that can be retrieved by
parsing the official JSON feeds. The maximum value of max_urls
in practice is 2000 (currently). If it is set higher, Delicious
will return the same links over and over again, giving lots of
duplicate items.
@type max_urls: int
@param sleep_seconds: Optional, default: 1.
Wait the specified number of seconds between subsequent queries in
case that there are multiple pages of bookmarks for the given url.
Must be greater than or equal to 1 to comply with Delicious.com's
Terms of Use.
See also parameter 'max_urls'.
@type sleep_seconds: int
@return: The list of recent URLs (of web documents) tagged with a given tag.
"""
assert sleep_seconds >= 1
urls = []
path = None
if tag is None or (tag is not None and max_urls > 0 and max_urls <= 100):
# use official JSON feeds
max_json_count = 100
if tag:
# tag-specific JSON feed
if popular:
path = "/v2/json/popular/%s?count=%d" % (tag, max_json_count)
else:
path = "/v2/json/tag/%s?count=%d" % (tag, max_json_count)
else:
# Delicious.com hotlist
path = "/v2/json/?count=%d" % (max_json_count)
data = self._query(path, host="feeds.delicious.com")
if data:
posts = []
try:
posts = simplejson.loads(data)
except TypeError:
pass
for post in posts:
# url
try:
url = post['u']
if url:
urls.append(url)
except KeyError:
pass
else:
# maximum number of urls/posts Delicious.com will display
# per page on its website
max_html_count = 100
# maximum number of pages that Delicious.com will display;
# currently, the maximum number of pages is 20. Delicious.com
# allows to go beyond page 20 via pagination, but page N (for
# N > 20) will always display the same content as page 20.
max_html_pages = 20
if popular:
path = "/popular/%s?setcount=%d" % (tag, max_html_count)
else:
path = "/tag/%s?setcount=%d" % (tag, max_html_count)
page_index = 1
urls = []
while path and page_index <= max_html_pages:
data = self._query(path)
path = None
if data:
# extract urls from current page
soup = BeautifulSoup(data)
links = soup.findAll("a", attrs={"class": re.compile("^taggedlink\s*")})
for link in links:
try:
url = link['href']
if url:
urls.append(url)
except KeyError:
pass
# check if there are more multiple pages of urls
soup = BeautifulSoup(data)
paginations = soup.findAll("div", id="pagination")
if paginations:
# find next path
nexts = paginations[0].findAll("a", attrs={ "class": "pn next" })
if nexts and (max_urls == 0 or len(urls) < max_urls) and len(urls) > 0:
# e.g. /url/2bb293d594a93e77d45c2caaf120e1b1?show=all&page=2
path = nexts[0]['href']
path += "&setcount=%d" % max_html_count
page_index += 1
# wait between queries to Delicious.com to be
# compliant with its Terms of Use
time.sleep(sleep_seconds)
if max_urls > 0:
return urls[:max_urls]
else:
return urls
def get_tags_of_user(self, username):
"""
Retrieves user's public tags and their tag counts from Delicious.com.
The tags represent a user's full public tagging vocabulary.
DeliciousAPI uses the official JSON feed of the user. We could use
RSS here, but the JSON feed has proven to be faster in practice.
@param username: The Delicious.com username.
@type username: str
@return: Dictionary mapping tags to their tag counts.
"""
tags = {}
path = "/v2/json/tags/%s" % username
data = self._query(path, host="feeds.delicious.com")
if data:
try:
tags = simplejson.loads(data)
except TypeError:
pass
return tags
def get_number_of_users(self, url):
"""get_number_of_users() is obsolete and has been removed. Please use get_url() instead."""
reason = "get_number_of_users() is obsolete and has been removed. Please use get_url() instead."
raise Exception(reason)
def get_common_tags_of_url(self, url):
"""get_common_tags_of_url() is obsolete and has been removed. Please use get_url() instead."""
reason = "get_common_tags_of_url() is obsolete and has been removed. Please use get_url() instead."
raise Exception(reason)
def _html_escape(self, s):
"""HTML-escape a string or object.
This converts any non-string objects passed into it to strings
(actually, using unicode()). All values returned are
non-unicode strings (using "&#num;" entities for all non-ASCII
characters).
None is treated specially, and returns the empty string.
@param s: The string that needs to be escaped.
@type s: str
@return: The escaped string.
"""
if s is None:
return ''
if not isinstance(s, basestring):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = str(s)
s = cgi.escape(s, True)
if isinstance(s, unicode):
s = s.encode('ascii', 'xmlcharrefreplace')
return s
class DeliciousError(Exception):
"""Used to indicate that an error occurred when trying to access Delicious.com via its API."""
class DeliciousWarning(Exception):
"""Used to indicate a warning when trying to access Delicious.com via its API.
Warnings are raised when it is useful to alert the user of some condition
where that condition doesn't warrant raising an exception and terminating
the program. For example, we issue a warning when Delicious.com returns a
HTTP status code for redirections (3xx).
"""
class DeliciousThrottleError(DeliciousError):
"""Used to indicate that the client computer (i.e. its IP address) has been temporarily blocked by Delicious.com."""
pass
class DeliciousUnknownError(DeliciousError):
"""Used to indicate that Delicious.com returned an (HTTP) error which we don't know how to handle yet."""
pass
class DeliciousUnauthorizedError(DeliciousError):
"""Used to indicate that Delicious.com returned a 401 Unauthorized error.
Most of the time, the user credentials for accessing restricted functions
of the official Delicious.com API are incorrect.
"""
pass
class DeliciousForbiddenError(DeliciousError):
"""Used to indicate that Delicious.com returned a 403 Forbidden error.
"""
pass
class DeliciousNotFoundError(DeliciousError):
"""Used to indicate that Delicious.com returned a 404 Not Found error.
Most of the time, retrying some seconds later fixes the problem
(because we only query existing pages with this API).
"""
pass
class Delicious500Error(DeliciousError):
"""Used to indicate that Delicious.com returned a 500 error.
Most of the time, retrying some seconds later fixes the problem.
"""
pass
class DeliciousMovedPermanentlyWarning(DeliciousWarning):
"""Used to indicate that Delicious.com returned a 301 Found (Moved Permanently) redirection."""
pass
class DeliciousMovedTemporarilyWarning(DeliciousWarning):
"""Used to indicate that Delicious.com returned a 302 Found (Moved Temporarily) redirection."""
pass
__all__ = ['DeliciousAPI', 'DeliciousURL', 'DeliciousError', 'DeliciousThrottleError', 'DeliciousUnauthorizedError', 'DeliciousUnknownError', 'DeliciousNotFoundError' , 'Delicious500Error', 'DeliciousMovedTemporarilyWarning']
if __name__ == "__main__":
d = DeliciousAPI()
max_bookmarks = 50
url = 'http://www.michael-noll.com/wiki/Del.icio.us_Python_API'
print "Retrieving Delicious.com information about url"
print "'%s'" % url
print "Note: This might take some time..."
print "========================================================="
document = d.get_url(url, max_bookmarks=max_bookmarks)
print document
| {
"content_hash": "287d7e7dc8beeba84f4ba8c7611178f1",
"timestamp": "",
"source": "github",
"line_count": 1253,
"max_line_length": 225,
"avg_line_length": 40.2633679169992,
"alnum_prop": 0.5654509415262636,
"repo_name": "jorik041/stackprinter",
"id": "b183ee9f5cdc7c0f2a6a9a0c48ae1309c0de510b",
"size": "50450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/lib/deliciousapi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83166"
},
{
"name": "CSS",
"bytes": "10317"
},
{
"name": "HTML",
"bytes": "6115383"
},
{
"name": "JavaScript",
"bytes": "113153"
},
{
"name": "Python",
"bytes": "586590"
}
],
"symlink_target": ""
} |
import csv
import math
# Define some constants that we'll want to have for our project.
FEATURES = 784
OUTPUTS = 10
def read_file(file_name):
# Start by reading in our CSV files.
with open(file_name) as file:
reader = csv.DictReader(file)
data = list(reader)
size = len(data)
return data, size
def get_batch(input_data, start, end):
output = []
batch_xs = []
batch_ys = []
for i in range(start, end):
data = input_data[i]
# Add the image ID to our output (starts at 1, so add 1 to the result)
output.append([i + 1])
# Create an array of 0s and then set which classification label is our
# expected result. Then append this to our batched Y outputs.
ys = [0] * OUTPUTS
if 'label' in data:
ys[int(data['label'])] = 1
batch_ys.append(ys)
# Finally we go through and actually get our image pixel data.
# This is currently just quickly hacked together by getting each
# pixel via its column header.
pixels = [0] * FEATURES
for y in range(0, 28):
for x in range(0, 28):
pixel = x + y * 28
pixels[pixel] = data['pixel' + str(pixel)]
batch_xs.append(pixels)
return output, batch_xs, batch_ys
| {
"content_hash": "4f5e77ae5a781512dc1347cc946afd05",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 30,
"alnum_prop": 0.5840909090909091,
"repo_name": "GEMISIS/machine-learning",
"id": "09dbb501868da69f53b189dcf4906da05a114253",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Kaggle/Digit Recognizer/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "650"
},
{
"name": "JavaScript",
"bytes": "4336"
},
{
"name": "Python",
"bytes": "97891"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('human_resources', '0015_employee_second_phone'),
('patients', '0004_auto_20160827_1808'),
]
operations = [
migrations.CreateModel(
name='PatientVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('time', models.TimeField()),
('duration_in_minutes', models.SmallIntegerField()),
('is_durations_auto_calculated', models.BooleanField(default=False)),
('is_first_visit', models.BooleanField(default=False)),
('status', models.CharField(choices=[('PLANNED', 'visit status planned'), ('CANCELLED', 'visit status cancelled'), ('NEED_TO_TRANSFER', 'visit status need to transfer'), ('NEED_TO_CANCELL', 'visit status need to cancell'), ('LATENESS', 'visit status lateness'), ('LATE_ARRIVALS_TRANSFER', 'visit status late arrivals transfer'), ('TRANSFER', 'visit status transfer'), ('ABSENCE', 'visit status absence'), ('CLIENT_WAIT', 'visit status client wait'), ('IN_PROCESS', 'visit status in process'), ('CLIENT_READY', 'visit status client ready')], default='PLANNED', max_length=30, verbose_name='visit status')),
('status_comment', models.TextField(blank=True)),
('patient_is_notified', models.BooleanField(default=False)),
('comment', models.TextField(blank=True)),
('wait_duration', models.SmallIntegerField(null=True)),
('wait_status_start_time', models.DateTimeField(null=True)),
('procedure_duration', models.SmallIntegerField(null=True)),
('procedure_status_start_time', models.DateTimeField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('post_price', models.SmallIntegerField(null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_by_employee_id', to='apps.human_resources.Employee')),
('doctor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='apps.human_resources.Employee')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apps.patients.Patient')),
],
options={
'verbose_name_plural': 'patient visits',
'verbose_name': 'patient visit',
},
),
migrations.CreateModel(
name='VisitReason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='visit reason name')),
('average_duration_minutes', models.SmallIntegerField(null=True, verbose_name='average durations in minutes')),
('average_price', models.SmallIntegerField(null=True, verbose_name='average price')),
],
options={
'verbose_name_plural': 'visit reasons',
'verbose_name': 'visit reason',
},
),
migrations.AddField(
model_name='patientvisit',
name='reasons',
field=models.ManyToManyField(to='apps.visit_planning.VisitReason'),
),
migrations.AddField(
model_name='patientvisit',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_by_employee_id', to='apps.human_resources.Employee'),
),
]
| {
"content_hash": "f152a7bb17bdb2e417b3d93d82824a42",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 621,
"avg_line_length": 57.333333333333336,
"alnum_prop": 0.6038928210313448,
"repo_name": "kirillmakhonin/med",
"id": "71ac3b629617ae61332fba847da8b4b6216fa0ec",
"size": "4027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MED.Server/apps/visit_planning/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "417564"
},
{
"name": "HTML",
"bytes": "1760"
},
{
"name": "JavaScript",
"bytes": "2902001"
},
{
"name": "Python",
"bytes": "82164"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
} |
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DeleteVolumesAction(BaseAction):
action = 'DeleteVolumes'
command = 'delete-volumes'
usage = '%(prog)s -v "volume_id,..." [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-v', '--volumes', dest='volumes',
action='store', type=str, default='',
help='the comma separated IDs of volumes you want to delete.')
@classmethod
def build_directive(cls, options):
if not options.volumes:
print('error: [volumes] should be specified')
return None
return {'volumes': explode_array(options.volumes)}
| {
"content_hash": "398313c3f6fc6a6328a32daea72e1ce8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 33.52173913043478,
"alnum_prop": 0.6277561608300908,
"repo_name": "yunify/qingcloud-cli",
"id": "72f074bb82273465ba15dbe332155e2682b7f927",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/volume/delete_volumes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
WSDL_FILE = "conf/evsapi41.wsdl"
# Root package for the resulting API
ROOT_PACKAGE = "cabig.evs"
# Mapping from Java to Python packages
# The ROOT_PACKAGE is prepended to each Python package
PACKAGE_MAPPING = {
'gov.nih.nci.evs.domain' : 'domain',
}
SERVICE_CLASS_NAME = "EVSApplicationService"
OUTPUT_DIR = "output"
| {
"content_hash": "5226d4ee7c592051ee5a5046a3beff43",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 23.357142857142858,
"alnum_prop": 0.7247706422018348,
"repo_name": "NCIP/python-api",
"id": "69fd1a010ffcd0f9491f918afe46ffc50d981ce2",
"size": "577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyevs/tags/0.1.0/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "4926"
},
{
"name": "Python",
"bytes": "792077"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.