code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_set_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
queue_service_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueServiceName": _SERIALIZER.url("queue_service_name", queue_service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
queue_service_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"queueServiceName": _SERIALIZER.url("queue_service_name", queue_service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class QueueServicesOperations(object):
"""QueueServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.ListQueueServices":
"""List all queue services for the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListQueueServices, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.ListQueueServices
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueueServices"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListQueueServices', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices'} # type: ignore
@distributed_trace
def set_service_properties(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.QueueServiceProperties",
**kwargs: Any
) -> "_models.QueueServiceProperties":
"""Sets the properties of a storage account’s Queue service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of a storage account’s Queue service, only properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules can be specified.
:type parameters: ~azure.mgmt.storage.v2021_06_01.models.QueueServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QueueServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.QueueServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.QueueServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'QueueServiceProperties')
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('QueueServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}'} # type: ignore
@distributed_trace
def get_service_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.QueueServiceProperties":
"""Gets the properties of a storage account’s Queue service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QueueServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.QueueServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.QueueServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.get_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('QueueServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/queueServices/{queueServiceName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_06_01/operations/_queue_services_operations.py
|
Python
|
mit
| 15,335
|
"""
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
|
kambysese/mne-python
|
examples/stats/plot_cluster_stats_evoked.py
|
Python
|
bsd-3-clause
| 3,021
|
#__ LGPL 3.0, 2014 Alexander Soloviev (no.friday@yandex.ru)
from re import match, VERBOSE
from comine.iface.maps import *
class Maps(object):
__PL = '''
\s*([\w]+)\s*-\s*([\w]+)
\s+([rwxps-]{4})
\s+(\w+)
\s+(\d+):(\d+)
\s+(\d+)
\s+(.*)
'''
__FSYM = { 'r' : IMaps.FLAG_READ, 'w' : IMaps.FLAG_WRITE,
'x' : IMaps.FLAG_EXEC, 's' : IMaps.FLAG_SHARED,
'p' : 0, '-' : 0 }
@classmethod
def pid(cls, pid):
if not isinstance(pid, int):
raise TypeError('invalid pid type=%s' % pid)
for rg in cls.read('/proc/%u/maps' % pid): yield rg
@classmethod
def read(cls, path, require = False):
with open(path, 'r') as F:
for rg in map(cls.parse, F):
if rg is not None:
yield rg
elif require is True:
raise ValueError('maps file is damaged')
@classmethod
def parse(cls, line): # -> (rg, perm, offset, inode, desc)
g = match(cls.__PL, line, VERBOSE)
if g is not None:
g = g.groups()
flags = reduce(lambda x, y: x | y,
map(lambda z: cls.__FSYM[z], g[2]))
entity = cls.__entity(g[4:7], g[3], g[7].strip())
return ((int(g[0], 16), int(g[1], 16)), flags, entity)
@classmethod
def __entity(cls, inode, offset, desc):
if len(desc) < 1:
return Anonymous()
elif desc[0] == '/':
inode = tuple(map(int, inode))
return Mapped(inode, int(offset,16), desc)
elif len(desc) > 2:
g = match('^\[(?:(stack)(?::(\d+))?|(.*))\]$', desc)
if g is not None:
if g.group(1) is not None:
return Stack(ppid = g.group(2) and int(g.group(2)))
else:
return Special(kind = g.group(3))
|
for-the-repose/comine
|
source/comine/arch/proc.py
|
Python
|
lgpl-3.0
| 1,973
|
"""1074. Number of Submatrices That Sum to Target
https://leetcode.com/problems/number-of-submatrices-that-sum-to-target/
Given a matrix and a target, return the number of non-empty submatrices that
sum to target.
A submatrix x1, y1, x2, y2 is the set of all cells matrix[x][y] with x1 <= x
<= x2 and y1 <= y <= y2.
Two submatrices (x1, y1, x2, y2) and (x1', y1', x2', y2') are different if
they have some coordinate that is different: for example, if x1 != x1'.
Example 1:
Input: matrix = [[0,1,0],[1,1,1],[0,1,0]], target = 0
Output: 4
Explanation: The four 1x1 submatrices that only contain 0.
Example 2:
Input: matrix = [[1,-1],[-1,1]], target = 0
Output: 5
Explanation: The two 1x2 submatrices, plus the two 2x1 submatrices, plus the
2x2 submatrix.
Example 3:
Input: matrix = [[904]], target = 0
Output: 0
Constraints:
1 <= matrix.length <= 100
1 <= matrix[0].length <= 100
-1000 <= matrix[i] <= 1000
-10^8 <= target <= 10^8
"""
from typing import List
from collections import Counter
class Solution:
def numSubmatrixSumTarget(self, matrix: List[List[int]], target: int) -> int:
rows, cols = len(matrix), len(matrix[0]),
pre_sum = [[0] * cols for _ in range(rows)]
for i in range(rows):
for j in range(cols):
pre_sum[i][j] = matrix[i][j] + (pre_sum[i][j - 1] if j > 0 else 0)
ans = 0
for y0 in range(cols):
for y1 in range(y0, cols):
counter = Counter()
counter[0] = 1
cur_sum = 0
for x in range(rows):
cur_sum += pre_sum[x][y1] - (pre_sum[x][y0 - 1] if y0 > 0 else 0)
ans += counter[cur_sum - target]
counter[cur_sum] += 1
return ans
|
isudox/leetcode-solution
|
python-algorithm/leetcode/problem_1074.py
|
Python
|
mit
| 1,769
|
""" Base worker module """
import abc
class BaseWorker(metaclass=abc.ABCMeta):
""" Base worker
Inherit from this class to add a worker
"""
need_session = False
def __init__(self, server):
self.server = server
self.log = server.log
@abc.abstractmethod
def handle(self, data, token=None, *, session=None):
""" Handle an event
:param dict data: data in the event
:param str token: token which send the data
"""
|
ningirsu/stepmania-server
|
smserver/listener/workers/base.py
|
Python
|
mit
| 503
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
class Command(BaseCommand):
help = "TEST i18n"
def add_arguments(self, parser):
parser.add_argument('action', nargs='?', help='action ?')
def handle(self, *args, **options):
from ... import tests
action = options.get('action')
print("Unknown action:", action)
|
gzqichang/wa
|
qi18n/qi18n/management/commands/test_i18n.py
|
Python
|
mit
| 418
|
{
'name': 'Discount prepaid order',
'version': '1.0',
'category': 'Custom',
'description': """
Order Discount when it's prepaid and margin is between specific values
""",
'author': 'Nadia Ferreyra',
'website': '',
'depends': ['base',
'sale',
'product',
'sale_promotions_extend',
'commercial_rules',
'flask_middleware_connector',
'sale_custom'
],
'data': ['data/product_data.xml',
'data/parameters.xml',
'views/sale_order_view.xml',
'views/account_view.xml'
],
'installable': True
}
|
Comunitea/CMNT_004_15
|
project-addons/prepaid_order_discount/__manifest__.py
|
Python
|
agpl-3.0
| 697
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from html import escape
# Bokeh imports
from bokeh.models import Paragraph
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
text = """
Your <a href="https://en.wikipedia.org/wiki/HTML">HTML</a>-supported text is initialized with the <b>text</b> argument. The
remaining div arguments are <b>width</b> and <b>height</b>. For this example, those values
are <i>200</i> and <i>100</i> respectively."""
@pytest.mark.selenium
class Test_TextParagraph(object):
def test_displays_div_as_text(self, bokeh_model_page) -> None:
para = Paragraph(text=text, css_classes=["foo"])
page = bokeh_model_page(para)
el = page.driver.find_element_by_css_selector('.foo div p')
assert el.get_attribute("innerHTML") == escape(text, quote=None)
assert page.has_no_console_errors()
def test_set_style(self, bokeh_model_page) -> None:
para = Paragraph(text=text, css_classes=["foo"], style={'font-size': '26px'})
page = bokeh_model_page(para)
el = page.driver.find_element_by_css_selector('.foo div')
assert 'font-size: 26px;' in el.get_attribute('style')
assert page.has_no_console_errors()
|
ericmjl/bokeh
|
tests/integration/widgets/test_paragraph.py
|
Python
|
bsd-3-clause
| 2,083
|
import sys
# Module containing non-deprecated functions borrowed from Numeric.
__docformat__ = "restructuredtext en"
# functions that are now methods
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) order or FORTRAN (column-major) order.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modiying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the corresponding
elements in `v` were inserted before the indices, the order of `a` would
be preserved.
Parameters
----------
a : 1-D array_like
Input array, sorted in ascending order.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given. If
'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new array
is filled with repeated copied of `a`. Note that this behavior is different
from a.resize(new_shape) which fills with zeros instead of repeated
copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : {tuple, int}
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The data
are repeated in the order that the data are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
Returns
-------
squeezed : ndarray
The input array, but with with all dimensions of length 1
removed. Whenever possible, a view on `a` is returned.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form `a[i,i+offset]`.
If `a` has more than two dimensions, then the axes specified
by `axis1` and `axis2` are used to determine the 2-D subarray
whose diagonal is returned. The shape of the resulting array
can be determined by removing `axis1` and `axis2` and appending
an index to the right equal to the size of the resulting diagonals.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is
returned. If `a` has larger dimensions, then an array of
diagonals is returned.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F'}, optional
The elements of `a` are read in this order. It can be either
'C' for row-major order, or `F` for column-major order.
By default, row-major order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size(),)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In row-major order, the row index varies the slowest, and the column
index the quickest. This can be generalized to multiple dimensions,
where row-major order implies that the index along the first axis
varies slowest, and the index along the last quickest. The opposite holds
for Fortran-, or column-major, mode.
Examples
--------
If an array is in C-order (default), then `ravel` is equivalent
to ``reshape(-1)``:
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x)
[1 2 3 4 5 6]
When flattening using Fortran-order, however, we see
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method.
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : integer, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical OR is performed.
The default (`axis` = `None`) is to perform a logical OR
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
any : bool, ndarray
A new boolean or `ndarray` is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all array elements along a given axis evaluate
to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical AND is performed.
The default (`axis` = `None`) is to perform a logical AND
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section "Output
arguments") for more details.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any array element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amax : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmax : nan values are ignored instead of being propagated
fmax : same behavior as the C99 fmax function
argmax : Indices of the maximum values.
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding max value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmax.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a)
3
>>> np.amax(a, axis=0)
array([2, 3])
>>> np.amax(a, axis=1)
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default a flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amin : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmin: nan values are ignored instead of being propagated
fmin: same behavior as the C99 fmin function
argmin: Return the indices of the minimum values.
amax, nanmax, fmax
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding min value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmin.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
l : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis over which the product is taken. By default, the product
of all elements is calculated.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default the
input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If dtype is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a precision
less than that of the default platform integer. In that case, the
default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows of)
`a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns of)
`a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
axis. float64 intermediate and return values are used for integer
inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary. See `doc.ufuncs` for details.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as ``x.sum() / N``, where
``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof``
is used instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables. The standard deviation computed in this function
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.std(a)
0.45172946707416706
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925552653
"""
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
Returns
-------
variance : ndarray, see dtype parameter above
If out=None, returns a new array containing the variance; otherwise
a reference to the output array is returned.
See Also
--------
std : Standard deviation
mean : Average
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a,0)
array([ 1., 1.])
>>> np.var(a,1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.var(a)
0.20405951142311096
Computing the standard deviation in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932997387
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.20250000000000001
"""
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
|
teoliphant/numpy-refactor
|
numpy/core/fromnumeric.py
|
Python
|
bsd-3-clause
| 71,781
|
import sys
from omniORB import CORBA, PortableServer
import Example, Example__POA
class EchoServer (Example__POA.Echo):
def echoString(self, mesg):
print "Got \"%s\", sending it back."%mesg
return mesg
orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
poa = orb.resolve_initial_references("RootPOA")
ei = EchoServer()
eo = ei._this()
print orb.object_to_string(eo)
poaManager = poa._get_the_POAManager()
poaManager.activate()
orb.run()
|
amonmoce/corba_examples
|
python/server.py
|
Python
|
mit
| 447
|
'''
Defines each of the benchmark problems used as well as the function sets
for those problems.
'''
from operator import or_, and_, add, sub, mul, div, xor
import itertools
import random
import math
def nand(x, y):
'''
Simple Nand function for inclusion in function sets.
'''
return not (x and y)
def nor(x, y):
'''
Simple Nor function for inclusion in function sets.
'''
return not (x or y)
def and_neg_in(x, y):
return (not x) and y
def protected(function):
'''
Decorator that ensures decorated functions always have a valid output.
If an exception occurs or infinity is returned, the first argument of the
function will be returned.
Parameters:
- ``function``: The function to be decorated.
'''
def inner(*args):
try:
# Call the function on the arguments
value = function(*args)
if math.isinf(value):
return args[0]
return value
except (ValueError, OverflowError, ZeroDivisionError):
return args[0]
inner.__name__ = function.__name__
return inner
def arity_controlled(desired):
'''
Decorator used to make functions take any number of inputs while only
using the first ``desired`` number of arguments. For example, you can
pass 10 arguments to a function that takes only 1 if ``desired=1`` and
the first of the arguments will actually be used.
Parameters:
- ``desired``: The actual arity of the wrapped function.
'''
def wrap(function):
def inner(*args):
return function(*args[:desired])
inner.__name__ = function.__name__
return inner
return wrap
# Standard lists of operators for different problems to use
binary_operators = [or_, and_, nand, nor]
regression_operators = [add, sub,
mul, div]
# for unary in [math.sin, math.cos, math.exp, math.log]:
# regression_operators.append(arity_controlled(1)(unary))
# Ensures all regression operators are numerically protected
regression_operators = [protected(op) for op in regression_operators]
class Problem(object):
'''
The abstract base of a problem
'''
def __init__(self, config):
'''
Designed to force children of this class to implement this function.
Children use this function to set up problem specific initialization
from configuration information.
'''
raise NotImplementedError()
def get_fitness(self, individual):
'''
Designed to force children of this class to implement this function.
Children use this function evaluate an individual and
return its fitness.
'''
raise NotImplementedError()
class Bounded_Problem(object):
'''
Base object for any problem with a known set of test cases. Stores a
map for all possible inputs to their correct outputs so they only
have to be evaluated once.
'''
def __init__(self, config):
'''
Create a new problem.
Parameters:
- ``config``: A dictionary containing the configuration information
required to fully initialize the problem. Should include values
for:
- Any configuration information required to construct the problem
range.
- ``epsilon``: The amount of allowed error on each test.
'''
self.config = config
self.training = [(inputs, self.problem_function(inputs))
for inputs in self.data_range(config)]
self.epsilon = config['epsilon']
def get_fitness(self, individual):
'''
Return the fitness of an individual as applied to this problem.
Parameters:
- ``individual``: The individual to be evaluated.
'''
score = 0
for inputs, outputs in self.training:
answers = individual.evaluate(inputs)
# Finds the average number of outputs more than epsilon away from
# the correct output
score += (sum(float(abs(answer - output) > self.epsilon)
for answer, output in zip(answers, outputs))
/ len(outputs))
# Returns the percentage of correct answers
return 1 - (score / float(len(self.training)))
def problem_function(self, _):
'''
Designed to force children of this class to implement this function.
Children use this function to define how to translate an input value
into an output value for their problem.
'''
raise NotImplementedError()
def binary_range(config):
'''
Given a dictionary specifying the ``input_length``, returns all binary
values of that length.
'''
return itertools.product((0, 1), repeat=config['input_length'])
def single_bit_set(config):
'''
Creates the list of all possible binary strings of specified length
with exactly one set bit. ``config`` should specify the ``input_length``.
'''
return [tuple(map(int,
'1'.rjust(i + 1, '0').ljust(config['input_length'], '0')
)
)
for i in range(config['input_length'])]
def float_samples(config):
'''
Returns random samples of the input space.
Parameters:
- ``config``: A dictionary containing information about the input space.
- ``min``: The minimum valid value in the space.
- ``max``: The maximum valid value in the space.
- ``input_length``: The number of input variables.
- ``samples``: The number of samples to draw.
'''
return ([random.uniform(config['min'], config['max'])
for _ in xrange(config['input_length'])]
for _ in xrange(config['samples']))
def float_range(config):
'''
Returns a incremental range of a floating point value. Like range() for
floats.
Parameters:
- ``config``: A dictionary containing information about the input space.
- ``min``: The minimum valid value in the space.
- ``max``: The maximum valid value in the space.
- ``step``: The distance between sample points.
'''
counter = 0
while True:
value = counter * config['step'] + config['min']
if value > config['max']:
break
yield value
counter += 1
def n_dimensional_grid(config):
'''
Returns a multidimensional grid of points in the input space.
Parameters:
- ``config``: A dictionary containing information about the input space.
- All configuration information required by ``float_range``.
- ``input_length``: How many dimensions are in the input space.
'''
return itertools.product(float_range(config),
repeat=config['input_length'])
class Binary_Mixin(object):
'''
Inheritance mixin useful for setting the class attributes of
binary problems.
'''
data_range = staticmethod(binary_range)
operators = binary_operators
max_arity = 2
class Regression_Mixin(object):
'''
Inheritance mixin useful for setting the class attributes of
regression problems.
'''
data_range = staticmethod(float_range)
operators = regression_operators
max_arity = 2
class Neutral(Problem):
'''
Defines the Neutral problem, in which all individuals receive the same
fitness. The only operator in this function is 'None', meaning only
connection genes actually evolve.
'''
operators = [None]
max_arity = 2
def __init__(self, _):
'''
Doesn't require initialization, but must implement.
'''
pass
def get_fitness(self, _):
'''
Returns the fitness of passed in individual, which is always 0.
'''
return 0
class Even_Parity(Bounded_Problem, Binary_Mixin):
'''
Defines the Even Parity problem.
'''
def problem_function(self, inputs):
'''
Return the even parity of a list of boolean values.
'''
return [(sum(inputs) + 1) % 2]
class Binary_Multiply(Bounded_Problem, Binary_Mixin):
'''
Defines the Binary Multiplier problem.
'''
def problem_function(self, inputs):
'''
Return the result of performing a binary multiplication of the first
half of the inputs with the second half. Will always have the same
number of output bits as input bits.
'''
# convert the two binary numbers to integers
joined = ''.join(map(str, inputs))
middle = len(joined) / 2
a, b = joined[:middle], joined[middle:]
# multiply the two numbers and convert back to binary
multiplied = bin(int(a, 2) * int(b, 2))[2:]
# pad the result to have enough bits
extended = multiplied.rjust(len(inputs), '0')
return map(int, extended)
class Binary_Multiply_Miller(Binary_Multiply):
operators = [and_, and_neg_in, xor, or_]
class Binary_Multiply_Torresen(Binary_Multiply):
operators = [and_, xor]
class Multiplexer(Bounded_Problem, Binary_Mixin):
'''
Defines the Multiplexer (MUX) Problem.
'''
def problem_function(self, inputs):
'''
Uses the first k bits as a selector for which of the remaining bits to
return.
'''
k = int(math.log(len(inputs), 2))
index = int(''.join(map(str, inputs[:k])), 2) + k
return [inputs[index]]
class Demultiplexer(Bounded_Problem, Binary_Mixin):
'''
Defines the Demultiplexer (DEMUX) Problem.
'''
def problem_function(self, inputs):
'''
Returns the last input bit on the output line specified by the binary
index encoded on all inputs except the last bit.
'''
k = int(math.log(len(inputs) - 1, 2))
index = int(''.join(map(str, inputs[:k])), 2) + k
return [inputs[index]]
class Binary_Encode(Bounded_Problem, Binary_Mixin):
'''
Defines the Binary Encode problem.
'''
# Set the data range to be all possible inputs with a single set bit.
data_range = staticmethod(single_bit_set)
def problem_function(self, inputs):
'''
Returns the binary encoding of which input line contains a one.
'''
oneat = inputs.index(1)
binary = bin(oneat)[2:]
width = math.log(len(inputs), 2)
return map(int, binary.zfill(int(width)))
class Binary_Decode(Bounded_Problem, Binary_Mixin):
'''
Defines the Binary Decode problem.
'''
def problem_function(self, inputs):
'''
Returns a 1 on the output line specified by the binary input index
'''
combined = ''.join(map(str, inputs))
width = 2 ** len(inputs)
base = [0] * width
base[int(combined, 2)] = 1
return base
class Breadth(Bounded_Problem, Binary_Mixin):
'''
Defines the Breadth problem.
'''
# Set the data range to be all possible inputs with a single set bit.
data_range = staticmethod(single_bit_set)
# Set the list of possible operators to just be OR.
operators = [or_]
def problem_function(self, inputs):
'''
Returns true as long as at least one input is true.
'''
return [sum(inputs) > 0]
class TwoFloor(Bounded_Problem, Binary_Mixin):
'''
Defines the Two Floor Problem.
'''
# Set the data range to be all possible inputs with a single set bit.
data_range = staticmethod(single_bit_set)
# Set the list of possible operators to just be OR.
operators = [or_]
def problem_function(self, inputs):
'''
Returns a string of bits half as long as the input string, where
the only set output bit is at the index // 2 of the set input bit.
'''
results = [0] * (len(inputs) // 2)
results[inputs.index(1) // 2] = 1
return results
class Depth(Problem):
'''
Defines the Depth problem.
'''
# Set the list of possible operators to just be just min(X, Y) + 1.
operators = [lambda X, Y: min(X, Y) + 1]
max_arity = 2
def __init__(self, config):
'''
Saves configuration for use during evaluation.
'''
self.config = config
def get_fitness(self, individual):
'''
Returns the fitness of the individual as a percentage of maximum
fitness.
'''
score = individual.evaluate((0,))[0]
return score / float(self.config['graph_length'])
class Flat(Problem):
'''
Defines the Flat problem, in which all individuals receive fitness
based on how many connection genes are connected to the input.
The only operator in this function is 'None', meaning only
connection genes actually evolve.
'''
operators = [None]
max_arity = 2
def __init__(self, _):
'''
Doesn't require initialization, but must implement.
'''
pass
def get_fitness(self, individual):
'''
Returns the percentage of connection genes connected to the input.
'''
correct, total = 0, 0
for gene in individual.genes:
if gene is not None:
if gene < 0:
correct += 1
total += 1
return correct / float(total)
class Novel(Problem, Binary_Mixin):
'''
Defines the Novel problem, which evaluates individuals based on how many
unique footprints the individual can create.
'''
def __init__(self, config):
complete = float(2 ** 2 ** config['input_length'])
self.best = float(min(complete, config['graph_length']))
def get_fitness(self, individual):
for inputs in binary_range(self.config):
individual.evaluate(inputs)
return len(set(individual.footprint)) / self.best
class Active(Problem):
'''
Defines the Active problem, in which all individuals receive fitness
based on how many active nodes they have.
The only operator in this function is 'None', meaning only
connection genes actually evolve.
'''
operators = [None]
max_arity = 2
def __init__(self, config):
'''
Saves configuration for use during evaluation.
'''
self.config = config
def get_fitness(self, individual):
'''
Returns the percentage of nodes that are active.
'''
return len(individual.active) / float(self.config['graph_length'])
class Koza_1(Bounded_Problem, Regression_Mixin):
'''
Defines the Koza-1 problem.
'''
def koza_quartic(self, inputs):
'''
Return the result of Koza-1 on the specified input. Expects the input
as a single element list and returns a single element list.
'''
x = inputs[0]
return [x ** 4 + x ** 3 + x ** 2 + x]
class Pagie_1(Bounded_Problem, Regression_Mixin):
'''
Defines the Pagie-1 problem.
'''
# Set the data range to be an n dimensional grid.
data_range = staticmethod(n_dimensional_grid)
def pagie(self, inputs):
'''
Returns the result of Pagie-1 on the specified inputs.
'''
x, y = inputs
return [1.0 / (1 + x ** -4) + 1.0 / (1 + y ** -4)]
|
brianwgoldman/cgp
|
problems.py
|
Python
|
bsd-2-clause
| 15,308
|
#!usr/bin/env python
# add species name to m8
import sys
from Bio import SeqIO
if len(sys.argv[1:]) < 1:
print 'python this.py foo.fsa'
raise SystemExit()
fasta = sys.argv[1]
f = open(fasta, 'r')
for i in f:
if i.startswith('>'):
j = i[i.find('../')+3: i.find('|')]
hd = i.split(' ')[0][1:]
print '>'+j+'|'+hd
else:
print i[:-1]
f.close()
|
Rinoahu/fastclust
|
example/deprecate/add_taxon2fas.py
|
Python
|
gpl-3.0
| 390
|
# Copyright (C) 2009 - TODAY Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import _, fields, models
from odoo.exceptions import UserError
from odoo.addons.l10n_br_fiscal.constants.fiscal import SITUACAO_EDOC_EM_DIGITACAO
class FiscalDocument(models.Model):
_inherit = "l10n_br_fiscal.document"
invoice_ids = fields.One2many(
comodel_name="account.invoice",
inverse_name="fiscal_document_id",
string="Invoices",
)
def unlink(self):
non_draft_documents = self.filtered(
lambda d: d.state != SITUACAO_EDOC_EM_DIGITACAO
)
if non_draft_documents:
UserError(
_("You cannot delete a fiscal document " "which is not draft state.")
)
return super().unlink()
|
OCA/l10n-brazil
|
l10n_br_account/models/fiscal_document.py
|
Python
|
agpl-3.0
| 832
|
'''
Structure definitions for the OSX MachO binary format.
'''
import vstruct
import vstruct.primitives as vs_prim
from vstruct.defs.macho.const import *
from vstruct.defs.macho.fat import *
from vstruct.defs.macho.loader import *
|
AbhishekShah212/School_Projects
|
esDay/rfcat_150225/vstruct/defs/macho/__init__.py
|
Python
|
mit
| 235
|
"""Module will not import due to invalid blahblah import."""
import blahblah.blah
print("I dont do much")
def arbmodinvalid_attribute():
"""Can't ever run on account of how blah doesn't exist."""
blahblah.blah.blah()
|
pypyr/pypyr-cli
|
tests/arbpack/arbinvalidimportmod.py
|
Python
|
apache-2.0
| 229
|
from flask import Blueprint, request
from skylines import api
from .json import jsonify
from .parser import parse_location
waves_blueprint = Blueprint('waves', 'skylines')
@waves_blueprint.route('/mountain_wave_project/')
@waves_blueprint.route('/mountain_wave_project', endpoint='list')
def _list():
location = parse_location(request.args)
return jsonify(api.get_waves_by_location(location))
|
kerel-fs/skylines
|
skylines/api/views/waves.py
|
Python
|
agpl-3.0
| 405
|
#-*- coding: utf-8 -*-
#Venom.
from resources.lib.gui.hoster import cHosterGui
from resources.lib.handler.hosterHandler import cHosterHandler
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.config import cConfig
from resources.lib.parser import cParser
from resources.lib.util import cUtil
import urllib
import re
SITE_IDENTIFIER = 'full_streaming_org'
SITE_NAME = 'Full-Streaming.org'
SITE_DESC = 'films en streaming, vk streaming, youwatch, vimple , streaming hd , streaming 720p , streaming sans limite'
URL_MAIN = 'http://full-streaming.org/'
MOVIE_NEWS = ('http://full-streaming.org/index.php?dlenewssortby=date', 'showMovies')
MOVIE_VIEWS = ('http://full-streaming.org/index.php?dlenewssortby=news_read', 'showMovies')
MOVIE_COMMENTS = ('http://full-streaming.org/index.php?dlenewssortby=comm_num', 'showMovies')
MOVIE_NOTES = ('http://full-streaming.org/index.php?dlenewssortby=rating', 'showMovies')
MOVIE_GENRES = (True, 'showGenre')
SERIE_SERIES = ('http://full-streaming.org/series/', 'showMovies')
SERIE_VFS = ('http://full-streaming.org/series-fr/', 'showMovies')
SERIE_VOSTFRS = ('http://full-streaming.org/series-vostfr/', 'showMovies')
URL_SEARCH = ('http://full-streaming.biz/index.php?do=xfsearch&xf=', 'showMovies')
FUNCTION_SEARCH = 'showMovies'
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', 'http://venom/')
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Films Nouveautés', 'news.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_VIEWS[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Films Les plus vues', 'films.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_COMMENTS[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Films Les plus commentés', 'films.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NOTES[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Films Les mieux notés', 'films.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', 'http://venom')
oGui.addDir(SITE_IDENTIFIER, 'showGenre', 'Films Genre', 'genres.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', 'http://full-streaming.org/index.php?do=search')
oGui.addDir(SITE_IDENTIFIER, 'showQlt', 'Films Qualités', 'films.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_SERIES[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Series Nouveautés', 'series.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Series VF', 'series.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFRS[0])
oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Series VOSTFR', 'series.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
sUrl = "http://full-streaming.biz/index.php?do=xfsearch&xf="+sSearchText
#sUrl = 'http://full-streaming.org/xfsearch/'+urllib.quote(sSearchText)
showMovies(sUrl)
oGui.setEndOfDirectory()
return
def showGenre():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
liste = []
liste.append( ['Action','http://full-streaming.org/action/'] )
liste.append( ['Animation','http://full-streaming.org/animation/'] )
liste.append( ['Arts Martiaux','http://full-streaming.org/arts-martiaux/'] )
liste.append( ['Aventure','http://full-streaming.org/aventure/'] )
liste.append( ['Biopic','http://full-streaming.org/biopic/'] )
liste.append( ['Comedie','http://full-streaming.org/comedie/'] )
liste.append( ['Comedie Dramatique','http://full-streaming.org/comedie-dramatique/'] )
liste.append( ['Comedie Musicale','http://full-streaming.org/comedie-musicale/'] )
liste.append( ['Documentaire','http://full-streaming.org/documentaire/'] )
liste.append( ['Drame','http://full-streaming.org/drame/'] )
liste.append( ['Epouvante Horreur','http://full-streaming.org/epouvante-horreur/'] )
liste.append( ['Erotique','http://full-streaming.org/erotique'] )
liste.append( ['Espionnage','http://full-streaming.org/espionnage/'] )
liste.append( ['Famille','http://full-streaming.org/famille/'] )
liste.append( ['Fantastique','http://full-streaming.org/fantastique/'] )
liste.append( ['Guerre','http://full-streaming.org/guerre/'] )
liste.append( ['Historique','http://full-streaming.org/historique/'] )
liste.append( ['Musical','http://full-streaming.org/musical/'] )
liste.append( ['Policier','http://full-streaming.org/policier/'] )
liste.append( ['Peplum','http://full-streaming.org/peplum/'] )
liste.append( ['Romance','http://full-streaming.org/romance/'] )
liste.append( ['Science Fiction','http://full-streaming.org/science-fiction/'] )
liste.append( ['Spectacle','http://full-streaming.org/spectacle/'] )
liste.append( ['Thriller','http://full-streaming.org/thriller/'] )
liste.append( ['Western','http://full-streaming.org/western/'] )
liste.append( ['Divers','http://full-streaming.org/divers/'] )
for sTitle,sUrl in liste:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showQlt():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
liste = []
liste.append( ['HD','http://full-streaming.org/hd/'] )
liste.append( ['BDrip Dvdrip','http://full-streaming.org/bdrip-dvdrip/'] )
liste.append( ['DvdScr R5','http://full-streaming.org/dvdscr-r5/'] )
liste.append( ['TS Cam','http://full-streaming.org/ts-cam/'] )
for sTitle,sUrl in liste:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'films.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch = ''):
oGui = cGui()
if sSearch:
sUrl = sSearch
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sHtmlContent = sHtmlContent.replace('<span class="likeThis">', '').replace('</span>','')
sPattern = 'class="movie movie-block">[ ]*<img src="([^<]+)" alt=".+?" title="([^<]+)"[ ]*/>.+?<h2 onclick="window.location.href=\'([^<]+)\'">.+?<div style="color:#F29000">.+?<div.+?>(.+?)</div>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addNone(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
dialog = cConfig().createDialog(SITE_NAME)
for aEntry in aResult[1]:
cConfig().updateDialog(dialog, total)
if dialog.iscanceled():
break
sTitle = aEntry[1]
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', str(aEntry[2]))
oOutputParameterHandler.addParameter('sMovieTitle', str(aEntry[1]))
oOutputParameterHandler.addParameter('sThumbnail', str(aEntry[0]))
if '/series' in sUrl or '-saison-' in aEntry[2]:
oGui.addTV(SITE_IDENTIFIER, 'seriesHosters', sTitle,'', aEntry[0], aEntry[3], oOutputParameterHandler)
else:
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sTitle, '', aEntry[0], aEntry[3], oOutputParameterHandler)
cConfig().finishDialog(dialog)
sNextPage = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Next >>>[/COLOR]', 'next.png', oOutputParameterHandler)
if not sSearch:
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
sPattern = '<div class="navigation".+? <span.+? <a href="(.+?)">'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return False
def showHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumbnail = oInputParameterHandler.getValue('sThumbnail')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sHtmlContent = sHtmlContent.replace('<iframe src="//www.facebook.com/','').replace('<iframe src=\'http://creative.rev2pub.com','').replace('<iframe src=\'http://creative.ad120m.com/', '')
sPattern = '<iframe.+?src="(.+?)"'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
total = len(aResult[1])
dialog = cConfig().createDialog(SITE_NAME)
for aEntry in aResult[1]:
cConfig().updateDialog(dialog, total)
if dialog.iscanceled():
break
sHosterUrl = str(aEntry)
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
try:
oHoster.setHD(sHosterUrl)
except: pass
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumbnail)
cConfig().finishDialog(dialog)
oGui.setEndOfDirectory()
def seriesHosters():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumbnail = oInputParameterHandler.getValue('sThumbnail')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request();
sHtmlContent = sHtmlContent.replace('<iframe src="//www.facebook.com/','').replace('<iframe src=\'http://creative.rev2pub.com','').replace('<iframe src=\'http://creative.ad120m.com/', '')
sPattern = '<dd><a href="([^<]+)" class="zoombox.+?" title="(.+?)"><button class="btn">.+?</button></a></dd>'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
total = len(aResult[1])
dialog = cConfig().createDialog(SITE_NAME)
for aEntry in aResult[1]:
cConfig().updateDialog(dialog, total)
if dialog.iscanceled():
break
sHosterUrl = str(aEntry[0])
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
sTitle=re.sub(r'\[.*\]',r'',aEntry[1])
oHoster.setDisplayName(sTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumbnail)
cConfig().finishDialog(dialog)
oGui.setEndOfDirectory()
|
Brahimbaz/venom-xbmc-addons-beta
|
plugin.video.vstream/resources/sites/trash/full_streaming_org.py
|
Python
|
gpl-2.0
| 12,837
|
#
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
""""linkState event plugin"""
import copy
from nav.config import ConfigurationError
from nav.eventengine.alerts import AlertGenerator
from nav.eventengine.plugins import delayedstate
from nav.models.manage import Interface, Netbox
from nav.models.event import EventQueue as Event, EventQueueVar as EventVar
class LinkStateHandler(delayedstate.DelayedStateHandler):
"""Accepts linkState events"""
HAS_WARNING_ALERT = False
ALERT_WAIT_TIME = 'linkDown.alert'
handled_types = ('linkState',)
_target = None
def __init__(self, *args, **kwargs):
super(LinkStateHandler, self).__init__(*args, **kwargs)
self.config = LinkStateConfiguration(self.engine.config)
def get_target(self):
if not self._target:
self._target = Interface.objects.get(id=self.event.subid)
assert self._target.netbox_id == self.event.netbox.id
return self._target
def get_link_partner(self):
"""Returns the link partner of the target interface"""
return self.get_target().to_netbox
def handle(self):
if self._is_a_master_for_virtualized_instances():
self._copy_event_for_instances()
return super(LinkStateHandler, self).handle()
def _handle_end(self):
self._post_event_if_aggregate_restored() # always verify aggregates
return super(LinkStateHandler, self)._handle_end()
def _set_internal_state_down(self):
self._set_ifoperstatus(Interface.OPER_DOWN)
def _set_internal_state_up(self):
self._set_ifoperstatus(Interface.OPER_UP)
def _set_ifoperstatus(self, ifoperstatus):
ifc = self.get_target()
if ifc.ifoperstatus != ifoperstatus:
ifc.ifoperstatus = ifoperstatus
Interface.objects.filter(id=ifc.id).update(ifoperstatus=ifoperstatus)
def _get_up_alert(self):
alert = AlertGenerator(self.event)
alert.alert_type = "linkUp"
self._logger.info(
"Posting %s alert for %s", alert.alert_type, self.get_target()
)
return alert
def _get_down_alert(self):
self._post_event_if_aggregate_degraded() # always verify aggregates
alert = AlertGenerator(self.event)
alert.alert_type = "linkDown"
if any(
(
self._hold_back_alert_due_to_vlan_mismatch(),
self._hold_back_alert_due_to_redundancy_limit(),
)
):
self._logger.info(
"%s: withholding %s alert because of unmatched " "criteria",
self.get_target(),
alert.alert_type,
)
return None
return alert
def _post_down_warning(self):
pass
def _hold_back_alert_due_to_redundancy_limit(self):
if self.config.alert_only_on_redundancy_loss():
partner = self.get_link_partner()
redundancy_loss = partner and partner.up == Netbox.UP_UP
if redundancy_loss:
self._logger.info(
"likely link redundancy degradation: %s is "
"down, but link partner %s is still up",
self.get_target(),
partner,
)
else:
return True
return False
def _hold_back_alert_due_to_vlan_mismatch(self):
limited_to_vlans = self.config.get_vlan_limit_set()
if limited_to_vlans:
vlans = self._get_target_vlans()
if vlans.intersection(limited_to_vlans):
self._logger.info(
"%s vlans %r intersects with list of " "limited vlans %r",
self.get_target(),
vlans,
limited_to_vlans,
)
elif vlans:
self._logger.info(
"%s vlans %r does not intersect with list " "of limited vlans %r",
self.get_target(),
vlans,
limited_to_vlans,
)
return True
return False
def _get_target_vlans(self):
"""Returns the set of untagged/tagged vlans configured on the target
interface.
"""
ifc = self.get_target()
vlans = ifc.swportvlan_set.values('vlan__vlan')
vlans = {row['vlan__vlan'] for row in vlans}
return vlans
#
# Methods to handle aggregateLinkState event posting if this interface is
# part of an aggregate
#
def _post_event_if_aggregate_degraded(self):
if self.get_target().get_aggregator():
self._logger.info(
"down event for %s, posting linkDegraded event for %s",
self.get_target(),
self.get_target().get_aggregator(),
)
return self._get_aggregate_link_event(start=True)
def _post_event_if_aggregate_restored(self):
if self.get_target().get_aggregator():
self._logger.info(
"up event for %s, posting linkRestored event for %s",
self.get_target(),
self.get_target().get_aggregator(),
)
return self._get_aggregate_link_event(start=False)
def _get_aggregate_link_event(self, start):
target = self.get_target()
aggregator = target.get_aggregator()
event = Event()
event.source_id = event.target_id = 'eventEngine'
event.netbox_id = aggregator.netbox_id
event.subid = aggregator.id
event.event_type_id = 'aggregateLinkState'
event.state = event.STATE_START if start else event.STATE_END
event.save()
EventVar(
event_queue=event,
variable='alerttype',
value='linkDegraded' if start else 'linkRestored',
).save()
EventVar(event_queue=event, variable='aggregate', value=target.id).save()
EventVar(
event_queue=event, variable='aggregate_ifname', value=target.ifname
).save()
EventVar(
event_queue=event, variable='aggregate_ifalias', value=target.ifalias or ''
).save()
#
# Methods to handle duplication of events for virtualized netbox instances
#
def _is_a_master_for_virtualized_instances(self):
ifc = self.get_target()
return ifc and ifc.netbox and ifc.netbox.instances.count() > 0
def _copy_event_for_instances(self):
ifc = self.get_target()
netbox = ifc.netbox
for instance in netbox.instances.all():
self._copy_event_for_instance(netbox, instance, ifc)
def _copy_event_for_instance(self, netbox, instance, ifc):
try:
other_ifc = Interface.objects.get(netbox=instance, ifname=ifc.ifname)
except Interface.DoesNotExist:
self._logger.info(
"interface %s does not exist on instance %s", ifc.ifname, instance
)
return
new_event = copy.copy(self.event) # type: nav.models.event.EventQueue
new_event.pk = None
new_event.netbox = instance
new_event.device = None
new_event.subid = other_ifc.pk
self._logger.info('duplicating linkState event for %s to %s', ifc, instance)
new_event.save()
class LinkStateConfiguration(object):
"""Retrieves configuration options for the LinkStateHandler"""
def __init__(self, config):
self.config = config
def get_vlan_limit_set(self):
"""Returns a set of VLAN IDs to limit linkState alerts to"""
opt = ("linkdown", "limit_to_vlans")
if self.config.has_option(*opt):
vlanstring = self.config.get(*opt)
try:
vlans = [int(vlan) for vlan in vlanstring.split()]
except (TypeError, ValueError):
raise ConfigurationError("Invalid config value for %s" % opt)
return set(vlans)
else:
return set()
def alert_only_on_redundancy_loss(self):
"""Returns True if linkState alerts are only to be sent on linkDown's
that degrade a redundant link setup
"""
opt = ("linkdown", "only_redundant")
if self.config.has_option(*opt):
return self.config.getboolean(*opt)
else:
return True
|
hmpf/nav
|
python/nav/eventengine/plugins/linkstate.py
|
Python
|
gpl-3.0
| 8,992
|
"""
Facade methods to WFS HERBIE
Note: Herbie Species is a WFS 'feature' with the following structure (as of 18/08/2016).
All species information are in the properties field.
{
'geometry': None,
'id': 'herbie_hbvspecies_public.fid--57d0ec5d_156a0d39c8d_-2266',
'type': 'Feature'
'properties': {
'added_on': '1997-11-02Z',
'all_vernaculars': None,
'author': '(C.E.Hubb.) Lazarides',
'comments': None,
'consv_code': None,
'editor': None,
'family_code': '031',
'family_nid': 22751,
'genus': 'Triodia',
'informal': None,
'infra_name': None,
'infra_name2': None,
'infra_rank': None,
'infra_rank2': None,
'is_current': 'Y',
'is_eradicated': None,
'kingdom_id': 3,
'linear_sequence': None,
'md5_rowhash': '0bb686e50b38be0ffc2d5b4f0f4870ff',
'name_id': 17879,
'naturalised': None,
'naturalised_certainty': None,
'naturalised_comments': None,
'naturalised_status': 'N',
'ogc_fid': 100,
'rank_id': 220,
'rank_name': 'Species',
'reference': 'Austral.Syst.Bot. 10:434 (1997)',
'species': 'helmsii',
'species_code': 'TRIHEL',
'species_name': 'Triodia helmsii',
'updated_on': '2004-12-09Z',
'vernacular': None
},
}
"""
from __future__ import absolute_import, unicode_literals, print_function, division
import logging
import requests
from confy import env
logger = logging.getLogger(__name__)
def get_key_for_value(dict_, value, default=None):
"""
A util function for dict reverse lookup.
Mostly used to find the species name from nameId given a species_name -> nameId dict
:param dict_:
:param value:
:param default:
:return: the key for the given value of default
"""
for k, v in dict_.items():
if v == value:
return k
return default
class HerbieError(Exception):
pass
class Property:
def __init__(self, herbie_name):
self.herbie_name = herbie_name
class SpeciesFacade(object):
PROPERTY_SPECIES_NAME = Property('species_name')
PROPERTY_NAME_ID = Property('name_id')
def name_id_by_species_name(self):
"""
:return: a dict where key is species_name and the value is name_id
"""
species = self.get_all_species([self.PROPERTY_SPECIES_NAME, self.PROPERTY_NAME_ID])
return dict(
[(sp[self.PROPERTY_SPECIES_NAME.herbie_name], sp[self.PROPERTY_NAME_ID.herbie_name]) for sp in species]
)
def get_all_species(self, properties=None):
"""
:param properties: a sequence of Property, e.g [PROPERTY_SPECIES_NAME, PROPERTY_NAME_ID] or None for all
attributes
:return: Return a list of species properties (see structure above) but with only the specified attributes.
NOTE: limiting the number of properties speed-up the request.
"""
raise NotImplementedError('`get_all_species(properties` must be implemented.')
class HerbieFacade(SpeciesFacade):
BASE_URL = env('HERBIE_SPECIES_WFS_URL',
'https://kmi.dbca.wa.gov.au/geoserver/ows?service=wfs&version=1.1.0'
'&request=GetFeature&typeNames=public:herbie_hbvspecies_public&outputFormat=application/json')
@staticmethod
def _add_attributes_filter_to_params(properties, params=None):
"""
:param properties: a sequence of Property
:param params: an initial parameter dictionary
:return:
"""
if properties:
if params is None:
params = {}
# WFS spec: {'propertyName': (p1,p2,..)}
params['propertyName'] = "({csv})".format(
csv=','.join([a.herbie_name for a in properties])
)
return params
@staticmethod
def _query_species(params=None):
r = requests.get(HerbieFacade.BASE_URL, params=params)
r.raise_for_status()
try:
return [f['properties'] for f in r.json()['features']]
except Exception as e:
# If we have an exception here it's probably because the request is not correct (XML error from geoserver)
message = 'Herbie returned an error: {}. \nURL: {}. \nResponse: {}'.format(e, r.url, r.content)
logger.warning(message)
raise HerbieError(message)
def name_id_by_species_name(self):
"""
:return: a dict where key is species_name and the value is name_id
"""
species = self.get_all_species([self.PROPERTY_SPECIES_NAME, self.PROPERTY_NAME_ID])
return dict(
[(sp[self.PROPERTY_SPECIES_NAME.herbie_name], sp[self.PROPERTY_NAME_ID.herbie_name]) for sp in species]
)
def get_all_species(self, properties=None):
"""
:param properties: a sequence of Property, e.g [PROPERTY_SPECIES_NAME, PROPERTY_NAME_ID] or None for all
attributes
:return: Return a list of species properties (see structure above) but with only the specified attributes.
NOTE: limiting the number of properties speed-up the request.
"""
return self._query_species(self._add_attributes_filter_to_params(properties))
class NoSpeciesFacade(SpeciesFacade):
def get_all_species(self, properties=None):
return []
# TODO: implement a cached version of Herbie (memory/disk or db?). Cache renewal policy?
|
gaiaresources/biosys
|
biosys/apps/main/utils_species.py
|
Python
|
apache-2.0
| 5,514
|
#!/usr/bin/python
import sys, os, threading, re
import tornado.web
import tornado.gen
import logging
import random
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('..')
import common
import config
from handler import RequestHandler
from db import MySQLHelper
logger = logging.getLogger('web')
class AuthKeyHandler(RequestHandler):
@tornado.gen.coroutine
@common.request_log('POST')
@common.json_loads_body
def post(self):
if self.body_json_object is None:
self.exception_handle(
'Request data format exception, %s' % self.request.uri)
return
tel = self.body_json_object.get('tel')
if tel is None or len(tel) == 0:
self.exception_handle('Missing argument \'tel\'')
return
if not re.match(r'^[1][0-9]{10}$', tel):
self.exception_handle('\'tel\' format is not correct')
return
code = random.randint(100000, 999999)
# TODO Send SMS message
print 'Your auth code is %s' % code
logger.debug('Your auth code is %s' % code)
r = common.get_redis_0()
if r is None:
self.exception_handle('Invalid Redis connection')
return
try:
r.set(tel, code, ex=config.AuthCode_ExpireTime) # Block ?
except Exception, e:
self.exception_handle('The database operation failed (Redis.Set)')
return
self.write(self.gen_result(0, 'Successfully sent', 'ok'))
return
|
codemeow5/travellers-acs
|
travellers/authkey.py
|
Python
|
gpl-2.0
| 1,320
|
#!/usr/bin/env python
#
# Tests for the dakotathon.method.vector_parameter_study module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import raises, assert_is_instance, assert_true, assert_equal
from dakotathon.method.vector_parameter_study import VectorParameterStudy
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print("\n*** " + __name__)
global v
v = VectorParameterStudy()
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
def test_init_no_params():
"""Test creating an instance with no parameters."""
v1 = VectorParameterStudy()
assert_is_instance(v1, VectorParameterStudy)
def test_method_attr():
"""Test the value of the method attribute."""
assert_equal(v.method, "vector_parameter_study")
def test_get_final_point():
"""Test getting the final_point property."""
assert_true(type(v.final_point) is tuple)
def test_set_final_point():
"""Test setting the final_point property."""
point = (42,)
v.final_point = point
assert_equal(v.final_point, point)
@raises(TypeError)
def test_set_final_point_fails_if_scalar():
"""Test that the final_point property fails with scalar."""
point = 42
v.final_point = point
def test_get_n_steps():
"""Test getting the n_steps property."""
assert_true(type(v.n_steps) is int)
def test_set_n_steps():
"""Test setting the n_steps property."""
n = 42
v.n_steps = n
assert_equal(v.n_steps, n)
@raises(TypeError)
def test_n_steps_fails_if_not_int():
"""Test that setting n_steps to a non-int fails."""
value = 42.0
v.n_steps = value
def test_str_special():
"""Test type of __str__ method results."""
s = str(v)
assert_true(type(s) is str)
def test_str_length():
"""Test the default length of __str__."""
v1 = VectorParameterStudy()
s = str(v1)
n_lines = len(s.splitlines())
assert_equal(n_lines, 5)
|
csdms/dakota
|
dakotathon/tests/test_method_vector_parameter_study.py
|
Python
|
mit
| 2,144
|
import os
import sys
import tempfile
import shutil
import pyfastaq
import pysam
from iva import common, mapping
class Error (Exception): pass
def _head_fastaq(reads1, reads2, outfile, count):
'''Takes first N sequences from a pair of interleaved fasta/q files. Output is in FASTA format. Returns hash of read length distribution (key=read length, value=count)'''
seq_reader1 = pyfastaq.sequences.file_reader(reads1)
if reads2 is not None:
seq_reader2 = pyfastaq.sequences.file_reader(reads2)
f = pyfastaq.utils.open_file_write(outfile)
lengths = {}
original_line_length = pyfastaq.sequences.Fasta.line_length
pyfastaq.sequences.Fasta.line_length = 0
i = 0
for seq1 in seq_reader1:
if reads2 is not None:
seq2 = next(seq_reader2)
else:
seq2 = None
for seq in (seq1, seq2):
if seq is None:
continue
lengths[len(seq)] = lengths.get(len(seq), 0) + 1
if type(seq) == pyfastaq.sequences.Fastq:
print(pyfastaq.sequences.Fasta(seq.id, seq.seq), file=f)
else:
print(seq, file=f)
i += 1
if i >= count:
break
pyfastaq.utils.close(f)
pyfastaq.sequences.Fasta.line_length = original_line_length
return lengths
def _median(d):
'''Returns the median key from histogram (as a dict with values=counts) of frequencies'''
assert(len(d))
count = 0
total = sum(d.values())
for key in sorted(d.keys()):
count += d[key]
if count >= 0.5 * total:
return key
def _run_kmc_with_script(script, reads, outfile, kmer, min_count, max_count, m_option, verbose, allow_fail, threads=1):
f = pyfastaq.utils.open_file_write(script)
print('set -e', file=f)
kmc_command = ''.join([
'kmc -fa',
' -m', str(m_option),
' -k', str(kmer),
' -sf', str(threads),
' -ci', str(min_count),
' -cs', str(max_count),
' -cx', str(max_count),
' ', reads,
' kmc_out',
' $PWD'
])
print(kmc_command, end='', file=f)
if verbose >= 2:
print('', file=f)
print('run kmc:', os.getcwd(), kmc_command)
else:
print(' > /dev/null', file=f)
print('kmc_dump', 'kmc_out', 'kmc_out.dump', file=f)
print('sort -k2nr', 'kmc_out.dump >', outfile, file=f)
pyfastaq.utils.close(f)
return common.syscall('bash ' + script, allow_fail=allow_fail)
def _run_kmc(reads, outprefix, kmer, min_count, max_count, verbose=0, threads=1):
'''Runs the kmer counting program kmc on a FASTA file. Returns filename made by kmc of the counts of kmers'''
reads = os.path.abspath(reads)
tmpdir = tempfile.mkdtemp(prefix='tmp.run_kmc.', dir=os.getcwd())
original_dir = os.getcwd()
kmer_counts_file = os.path.abspath(outprefix + '.kmer_counts')
os.chdir(tmpdir)
# KMC seems a bit flaky with the -m for RAM option.and dies striaght away.
# The range is 4-32 (GB).
# Try 4 and 32 (the default), then give up. This seems to make a difference, regardless of
# RAM available on the machine.
ran_ok = _run_kmc_with_script('run_kmc.sh', reads, kmer_counts_file, kmer, min_count, max_count, 32, verbose, True, threads=threads)
if not ran_ok:
if verbose:
print('First try of running kmc failed. Trying again with -m4 instead of -m32...', flush=True)
ran_ok = _run_kmc_with_script('run_kmc.sh', reads, kmer_counts_file, kmer, min_count, max_count, 4, verbose, False, threads=threads)
os.chdir(original_dir)
shutil.rmtree(tmpdir)
if not ran_ok:
raise Error('Error running kmc. Cannot continue')
return kmer_counts_file
def _kmc_to_kmer_counts(infile, number, kmers_to_ignore=None, contigs_to_check=None, verbose=0, threads=1):
'''Makes a dict of the most common kmers from the kmer counts output file of kmc'''
counts = {}
if os.path.getsize(infile) == 0:
return counts
tmpdir = tempfile.mkdtemp(prefix='tmp.common_kmers.', dir=os.getcwd())
ref_seqs_file = os.path.join(tmpdir, 'ref.fa')
counts_fasta_file = os.path.join(tmpdir, 'counts.fa')
using_refs = _write_ref_seqs_to_be_checked(ref_seqs_file, kmers_to_ignore=kmers_to_ignore, contigs_to_check=contigs_to_check)
if not using_refs:
if verbose > 2:
print('No existing kmers or contigs to check against. Using most common kmer for seed', flush=True)
f = pyfastaq.utils.open_file_read(infile)
for line in f:
if len(counts) >= number:
break
try:
kmer, count = line.rstrip().split()
count = int(count)
except:
raise Error('Error getting kmer info from this line:\n' + line)
counts[kmer] = count
pyfastaq.utils.close(f)
else:
if verbose > 2:
print('Existing kmers or contigs to check against. Running mapping', flush=True)
mapping_prefix = os.path.join(tmpdir, 'map')
bam = mapping_prefix + '.bam'
_counts_file_to_fasta(infile, counts_fasta_file)
mapping.map_reads(counts_fasta_file, None, ref_seqs_file, mapping_prefix, minid=0.9, index_k=9, index_s=1, sort=False, verbose=verbose, required_flag='0x4', threads=threads)
sam_reader = pysam.Samfile(bam, "rb")
for sam in sam_reader.fetch(until_eof=True):
if len(counts) >= number:
break
try:
count = sam.qname.split('_')[1]
except:
raise Error('Error getting count from sequence name in bam:\n' + sam.qname)
nucleotides = common.decode(sam.seq)
if nucleotides not in kmers_to_ignore:
counts[nucleotides] = count
elif verbose >= 4:
print('Skipping seed already found:', nucleotides)
sam_reader.close()
shutil.rmtree(tmpdir)
return counts
def _write_ref_seqs_to_be_checked(outfile, kmers_to_ignore=None, contigs_to_check=None):
if (kmers_to_ignore is None or len(kmers_to_ignore) == 0) and (contigs_to_check is None or len(contigs_to_check) == 0):
return False
f = pyfastaq.utils.open_file_write(outfile)
i = 1
if kmers_to_ignore is not None:
for kmer in kmers_to_ignore:
print('>', i, sep='', file=f)
print(kmer, file=f)
i += 1
if contigs_to_check is not None and len(contigs_to_check) > 0:
original_line_length = pyfastaq.sequences.Fasta.line_length
pyfastaq.sequences.Fasta.line_length = 0
for name in contigs_to_check:
if len(contigs_to_check[name].fa) > 20:
print(contigs_to_check[name].fa, file=f)
pyfastaq.sequences.Fasta.line_length = original_line_length
pyfastaq.utils.close(f)
return True
def _counts_file_to_fasta(infile, outfile):
fin = pyfastaq.utils.open_file_read(infile)
fout = pyfastaq.utils.open_file_write(outfile)
i = 1
for line in fin:
try:
kmer, count = line.rstrip().split()
count = int(count)
except:
raise Error('Error getting kmer info from this line:\n' + line)
print('>', i, '_', count, sep='', file=fout)
print(kmer, file=fout)
i += 1
pyfastaq.utils.close(fin)
pyfastaq.utils.close(fout)
def get_most_common_kmers(reads1, reads2, kmer_length=None, head=100000, min_count=10, max_count=100000000, most_common=100, method='kmc', verbose=0, ignore_seqs=None, contigs_to_check=None, kmc_threads=1, map_threads=1):
'''Gets the most common kmers from a pair of interleaved read FASTA or FASTQ files. Takes the first N sequences (determined by head). Returns a dict of kmer=>frequency. If kmer length is not given, use min(0.8 * median read length, 95)'''
tmpdir = tempfile.mkdtemp(prefix='tmp.common_kmers.', dir=os.getcwd())
counts = {}
reads = os.path.join(tmpdir, 'reads.fa')
read_lengths = _head_fastaq(reads1, reads2, reads, head)
if len(read_lengths) == 0:
shutil.rmtree(tmpdir)
return counts
if kmer_length is None:
kmer_length = min(int(0.8 * _median(read_lengths)), 95)
if method == 'kmc':
counts_file = _run_kmc(reads, os.path.join(tmpdir, 'out'), kmer_length, min_count, max_count, verbose=verbose, threads=kmc_threads)
counts = _kmc_to_kmer_counts(counts_file, most_common, kmers_to_ignore=ignore_seqs, contigs_to_check=contigs_to_check, verbose=verbose, threads=map_threads)
else:
raise Error('Method "' + method + '" not supported in kcount.get_most_common_kmers(). Cannot continue.')
shutil.rmtree(tmpdir)
return counts
|
satta/iva
|
iva/kcount.py
|
Python
|
gpl-3.0
| 8,789
|
# -*- coding: utf-8 -*-
#
# django-dynamic-scraper documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 5 15:05:19 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-dynamic-scraper'
copyright = u'2015, Holger Drewes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7-beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-dynamic-scraperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-dynamic-scraper.tex', u'django-dynamic-scraper Documentation',
u'Holger Drewes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-dynamic-scraper', u'django-dynamic-scraper Documentation',
[u'Holger Drewes'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-dynamic-scraper', u'django-dynamic-scraper Documentation',
u'Holger Drewes', 'django-dynamic-scraper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
c3pko/GovScrape
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,910
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .webrisk import (
ComputeThreatListDiffRequest,
ComputeThreatListDiffResponse,
RawHashes,
RawIndices,
RiceDeltaEncoding,
SearchHashesRequest,
SearchHashesResponse,
SearchUrisRequest,
SearchUrisResponse,
ThreatEntryAdditions,
ThreatEntryRemovals,
CompressionType,
ThreatType,
)
__all__ = (
"ComputeThreatListDiffRequest",
"ComputeThreatListDiffResponse",
"RawHashes",
"RawIndices",
"RiceDeltaEncoding",
"SearchHashesRequest",
"SearchHashesResponse",
"SearchUrisRequest",
"SearchUrisResponse",
"ThreatEntryAdditions",
"ThreatEntryRemovals",
"CompressionType",
"ThreatType",
)
|
googleapis/python-webrisk
|
google/cloud/webrisk_v1beta1/types/__init__.py
|
Python
|
apache-2.0
| 1,284
|
# coding=utf-8
"""Mixins used to give common functionality to NimLime commands."""
import inspect
import sublime
from NimLime.core import settings
SUBLIME_VERSION = int(sublime.version())
EXE_NOT_FOUND_MSG = ('Unable to run command, the following executables could '
'not be found: ')
NO_SETTINGS_SELECTOR_MSG = ("NimLime command {0} in {1} has no settings "
"selector.")
class NimLimeMixin(object):
"""
Mixin class for commands and event listeners.
Implements additional functionality for setting loading, requirements,
etc.
Note: The docstring for the command functions as the command description.
"""
# Executable requirements.
# Set these to 'true' in the implementing class in order to specify that
# the command should only be visible/enabled when the stated condition is
# true
requires_nim_syntax = False # The current view must be using Nim syntax.
# Setting entries associated with the command or event listener.
# Each entry should either be a tuple of the form
# (attribute_name, setting_name, default_value)
# or a tuple containing sub-entries of the same form.
settings_selector = None
setting_entries = (
('enabled', '{0}.enabled', False),
)
def __getattr__(self, item):
"""Used to satisfy static checkers."""
pass
def __init__(self):
# print("Creating", self)
settings.notify_on_change(self._load_settings)
self._load_settings()
def _get_setting(self, key, default):
"""
Retrieve a setting value.
Retrieve the setting value associated with the given key, returning the
given default if the setting doesn't exist.
The key must have a format specifier of '{0}'!
"""
# import traceback;traceback.print_stack()
if self.settings_selector is None:
raise Exception(NO_SETTINGS_SELECTOR_MSG.format(
self.__class__.__name__, inspect.getfile(self.__class__)
))
formatted_key = key.format(self.settings_selector)
result = settings.get(formatted_key, default)
return result
def _load_settings(self):
# Recursively load settings
def _is_setting_entry(entry):
return (
len(entry) == 3 and
isinstance(entry[0], str) and
isinstance(entry[1], str)
)
def _load_entry(entry):
if _is_setting_entry(entry):
attr_name, setting_name, default = entry
value = self._get_setting(setting_name, default)
setattr(self, attr_name, value)
elif isinstance(entry, tuple):
for sub_entry in entry:
_load_entry(sub_entry)
else:
raise Exception('Bad setting entry type')
_load_entry(self.setting_entries)
def is_enabled(self, view=None):
if not self.enabled:
return False
return True
if view is None:
view = sublime.active_window().active_view()
syntax = view.settings().get('syntax', '')
result = True
if self.requires_nim_syntax and not syntax.find('Nim.'):
result = False
return result
def is_visible(self):
return self.is_enabled()
def description(self, *args, **kwargs):
return self.__doc__
del NimLimeMixin.__getattr__
class NimLimeOutputMixin(NimLimeMixin):
"""A mixin for commands that generate output."""
output_panel = None
output_panel_tag = None
setting_entries = (
NimLimeMixin.setting_entries,
# ('output_limit', '{0}.output.limit', 0),
('show_output', '{0}.output.show', True),
('send_output', '{0}.output.send', True),
('output_tag', '{0}.output.tag', 'nimlime'),
('output_panel_theme', '{0}.output.panel_theme', '')
)
def _refresh_output_panel(self, view):
window = view.window()
tag = self.output_tag.format(
view_id=view.id(),
buffer_id=view.id(),
file_name=view.file_name(),
view_name=view.name(),
window_id=window.id()
)
self.output_panel = window.create_output_panel(tag)
# TODO Find a way to be more reactive
output_panel_theme = self.output_panel_theme
if not output_panel_theme:
output_panel_theme = view.settings().get("color_scheme")
self.output_panel.settings().set('color_scheme', output_panel_theme)
self.output_panel.settings().set('word_wrap', True)
return tag
def write_to_output(self, content, view):
"""Write the given content to an output view."""
window = view.window()
tag = self._refresh_output_panel(view)
# Output to the view
# if False and self.clear_output:
# self.output_panel.run_command(
# 'nimlime_output',
# dict(
# action='erase',
# args=(0, self.output_panel.size())
# )
# )
self.output_panel.run_command(
'append',
dict(characters = content)
)
# Show the view
if self.show_output:
window.run_command(
'show_panel',
{'panel': 'output.' + tag}
)
|
Varriount/NimLime
|
core/utils/mixins.py
|
Python
|
mit
| 5,478
|
import os
from azure.storage.common import CloudStorageAccount
class DownLoadJob():
"This is the class for download job"
def __init__(self, jobSession, targetFolder, sourceShare, sourceDir=None):
print('+++++ Create download job +++++')
self.jobSession = jobSession
self.sourceShare = sourceShare
self.sourceDir = sourceDir
self.targetFolder = targetFolder
def startdownload(self):
"method for downloading the data from share"
if self.sourceDir != None:
print('----- Checking folder - ' + self.sourceDir)
generator = self.jobSession.list_directories_and_files(
self.sourceShare, self.sourceDir)
else:
print('----- Checking folder - ' + self.sourceShare)
generator = self.jobSession.list_directories_and_files(
self.sourceShare)
for file_or_dir in generator:
classstr = str(type(file_or_dir))
if 'Directory' in classstr:
dirpath = self.targetFolder + '/' + file_or_dir.name
try:
os.makedirs(dirpath)
except OSError:
if not os.path.isdir(dirpath):
raise
if self.sourceDir != None:
subdirpath = self.sourceDir + "/" + file_or_dir.name
else:
subdirpath = file_or_dir.name
print('--------- Enumerating subfolder - ' + file_or_dir.name)
innerjob = DownLoadJob(
self.jobSession, dirpath, self.sourceShare, subdirpath)
innerjob.startdownload()
else:
destfilepath = self.targetFolder + '/' + file_or_dir.name
print('-------- Downloading file - ' + file_or_dir.name)
self.jobSession.get_file_to_path(
self.sourceShare, self.sourceDir, file_or_dir.name, destfilepath)
STORAGE_ACCOUNT_NAME = ''
STORAGE_ACCOUNT_KEY = ''
print('Download the files from Azure File Share')
try:
sourceaccount = CloudStorageAccount(
STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY)
session = sourceaccount.create_file_service()
job = DownLoadJob(session, "~/temp", 'dir')
job.startdownload()
except Exception as e:
print('Error occurred in the sample.', e)
|
nazheng/Scrach
|
Download.py
|
Python
|
mit
| 2,495
|
# Copyright (c) 2019-2020 Alexander Todorov <atodorov@MrSenko.com>
# Licensed under the GPL 2.0: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r"^(?P<pk>\d+)/$", views.Get.as_view(), name="bugs-get"),
re_path(r"^new/$", views.New.as_view(), name="bugs-new"),
re_path(r"^(?P<pk>\d+)/edit/$", views.Edit.as_view(), name="bugs-edit"),
re_path(r"^search/$", views.Search.as_view(), name="bugs-search"),
re_path(r"^comment/$", views.AddComment.as_view(), name="bugs-comment"),
]
|
kiwitcms/Kiwi
|
tcms/bugs/urls.py
|
Python
|
gpl-2.0
| 583
|
#!/usr/bin/python
### TBLASTX formatting
### MATCH = Only the first match keeped
MATCH = 0 # Only 1rst match Wanted
#MATCH = 1 # All match want
### SUBMATCH = several part of a same sequence match with the query
SUBMATCH = 0 # SUBMATCH NOT WANTED (ONLY 1rst HIT)
#SUBMATCH =1 # SUBMATCH WANTED
### NAME FORMATTING:
# [A] FORMAT QUERY NAME 1st STEP [IN DEF1]
# [B] FORMAT MATCH NAME 1st STEP [IN DEF2.1]
# [C] FORMAT MATCH NAME 2nd STEP [MIDDLE of DEF 2.3]
# [D] FORMAT QUERY NAME 2nd STEP [END of DEF 2.3]
# [E] FORMAT MATCH NAME 3rd STEP [END of DEF 2.3]
### SPECIFICITY TBLASTX (/BLASTN) formatting:
## 1/ "TBLASTX" formatting => At start of "RUN RUN RUN" change the keyword
## 2/ change line "if keyword in nextline:" in function "split_file"
## 3/ change "Strand" by "Frame" in function "get_information_on_matches"
########################################
########################################
### DEF 1. Split each "BLASTN" event ###
########################################
########################################
#def split_file(file_in, file_out, keyword):
def split_file(path_in, keyword):
print path_in
file_in = open(path_in, "r")
RUN = ''
BASH1={}
while 1:
nextline = file_in.readline()
#if not nextline:
# break
##################################
##################################
### [A] FORMATTING QUERY NAME ###
##################################
##################################
### Get query name ###
if nextline[0:6]=='Query=':
print "[%s]" %nextline[:-1]
L1 = string.split(nextline, "||")
L2 = string.split(L1[0], " ")
query = L2[1]
if query[-1] == "\n":
query = query[:-1]
print "[%s]" %query
######################################
######################################
### [A] END FORMATTING QUERY NAME ###
######################################
######################################
### split the file with keyword ###
if keyword in nextline:
#if nextline[:6] == keyword: # start of a "RUN" block // Treatment of lines starting with the keyword ("BLASTN") only
# Two cases here:
#1# If it is the first "RUN" in the block (i.e. the first occurence of "BLASTN" in the file), we have just to add the new lines in the "RUN" list ... 2nd , we have also to detect the 'key' of bash1, which is the "query" name ... and third we will have to save this "RUN" in the bash1, once we will have detected a new "RUN" (i.e. a new line beginning with "BLASTN".
#2# If it isn't the first run, we have the save the previous "RUN" in the "bash1", before to re-initialize the RUN list (RUN =[]), before to append lines to the new "RUN"
if RUN == '': # case #1#
RUN = RUN + nextline # we just added the first line of the file
else: # case #2# (there was a run before)
BASH1[query] = RUN # add the previous run to the bash
RUN = '' # re-initialize the "RUN"
RUN = RUN + nextline # add the line starting with the keyword ("BLASTN") (except the first line of the file (the first "RUN")
else: # Treatment of the subsequent lines of the one starting with the keyword ("BLASTN") (which is not treated here but previously)
RUN = RUN + nextline
if not nextline: # when no more line, we should record the last "RUN" in the bash1
#print "END = %s" %query
BASH1[query] = RUN # add the last "RUN"
break
file_in.close()
#file_out.close()
#print "BASH1.keys() = %s" %BASH1.keys()
return(BASH1)
#########################################################
#########################################################
### 2. Parse blast output for each query
#########################################################
### 2.1. detect matches (i.e. 'Sequences producing significant alignments:' ###
def detect_Matches(query, MATCH, WORK_DIR):
F5 = open("%s/tmp/blastRun2.tmp" %WORK_DIR, 'w')
F5.write(bash1[query])
F5.close()
F6 = open("%s/tmp/blastRun2.tmp" %WORK_DIR, 'r')
list1 =[]
list2 =[]
while 1:
nexteu = F6.readline()
if not nexteu : break
if "***** No hits found ******" in nexteu :
hit = 0
#print "NO HITS FOUND"
break
if 'Sequences producing significant alignments:' in nexteu:
hit = 1
#print "HITS FOUND!!!!!!!!!!"
F6.readline() # jump a line
while 1:
nexteu2 = F6.readline()
if nexteu2[0]==">": break
######################################
######################################
### [B] FORMAT MATCH NAME 1st STEP ###
######################################
######################################
if nexteu2 != '\n':
LL1 = string.split(nexteu2, " ") # specific NORTH database names !!!!!!!
match = LL1[0] #### SOUTH databank // NORTH will have "|" separators
list1.append(match)
match2 = ">" + LL1[0] # more complete name // still specific NORTH database names !!!!!!!
list2.append(match2) #### SOUTH databank // NORTH will have "|" separators
if MATCH == 0: ## Only read the 1rst line (i.e. the First Match)
break
else: ## Read the other lines (i.e. All the Matches)
continue
##########################################
##########################################
### [B] END FORMAT MATCH NAME 1st STEP ###
##########################################
##########################################
break
##############################################
## [OPTION 1st HIT] ONLY KEEP THE FIRST ENTRY FOR EACH QUERY
#if list1 != []: # when no match, this list is empty
# hit1 = list1[0]
# list1 = [hit1]
#if list2 != []: # when no match this list is empty
# hit2 = list2[0]
# list2 = [hit2]
## [/OPTION 1st HIT]
##############################################
#print "LIST1 = %s" %list1
#print "LIST2 = %s" %list2
F6.close()
return(list1, list2, hit) # list1 = short name // list2 = more complete name
############################################
### 2.2. Get Information on matches ###
### Function used in the next function (2.3.)
#############################################
def get_information_on_matches(list_of_line):
for line in list_of_line:
## Score and Expect
if "Score" in line:
#print line
line = line[:-1] # remove "\n"
S_line = string.split(line, " = ")
Expect = S_line[-1] ## ***** Expect
S_line2 = string.split(S_line[1], " bits ")
Score = string.atof(S_line2[0])
## Identities/gaps/percent/divergence/length_matched
elif "Identities" in line:
line = line[:-1] # remove "\n"
g = 0
if "Gaps" in line:
#print "HIT!!!"
pre_S_line = string.split(line, ",")
identity_line = pre_S_line[0]
gaps_line = pre_S_line[1]
g = 1
else:
identity_line = line
g = 0
## treat identity line
S_line = string.split(identity_line, " ")
identities = S_line[-2] ## ***** identities
#print "\t\tIdentities = %s" %identities
S_line2 = string.split(identities, "/")
hits = string.atof(S_line2[0]) ## ***** hits
length_matched = string.atof(S_line2[1]) ## ***** length_matched
abs_nb_differences = length_matched - hits ## ***** abs_nb_differences
## identity_percent = S_line[-1]
identity_percent = hits/length_matched * 100 ## ***** identity_percent
#print "\t\tIdentity (percent) = %.2f" %identity_percent
divergence_percent = abs_nb_differences/length_matched*100 ## ***** divergence_percent
#print "\t\tDivergence (percent) = %.2f" %divergence_percent
## treat gap line if any
if g ==1: # means there are gaps
S_line3 = string.split(gaps_line, " ")
gaps_part = S_line3[-2]
S_line4 = string.split(gaps_part, "/")
gaps_number = string.atoi(S_line4[0]) ## ***** gaps_number
#print "\t\tGaps number = %s" %gaps_number
real_differences = abs_nb_differences - gaps_number ## ***** real_differences
real_divergence_percent = (real_differences/length_matched)*100 ## ***** real_divergence_percent
#print "\t\tReal divergence (percent)= %.2f" %real_divergence_percent
else:
gaps_number = 0
#print "\t\tGaps number = %s" %gaps_number
real_differences = 0
real_divergence_percent = divergence_percent
## Strand
#elif "Strand" in line:
# line = line[:-1] # remove "\n"
# S_line = string.split(line, " = ")
# strand = S_line[1]
# print "\t\tStrand = %s" %strand
## Frame
elif "Frame" in line:
line = line[:-1] # remove "\n"
S_line = string.split(line, " = ")
frame = S_line[1]
#print "\t\tFrame = %s" %frame
list_informations=[length_matched, Expect, Score, identities, hits, identity_percent, divergence_percent,gaps_number, real_divergence_percent, frame, length_matched]
return(list_informations)
###############################################
###############################################
### 2.3. get sequences ###
### [+ get informations from the function 2.2.]
###############################################
###############################################
def get_sequences(query, list2, SUBMATCHEU, WORK_DIR):
#print "\t[get_sequence BEGIN]"
list_Pairwise = []
F7 = open("%s/tmp/blastRun3.tmp" %WORK_DIR, 'w')
F7.write(bash1[query]) # bash1[query] ==> blast output for each query
F7.close()
F8 = open("%s/tmp/blastRun3.tmp" %WORK_DIR, 'r')
text1 = F8.readlines()
miniList = []
for name in list2: # "list2" contains name of matched sequences (long version! the list1 is the same list but for short version names). It was previously generated by "detect_Matches" function
l = -1
for n in text1:
l = l+1
if name in n:
i = l
miniList.append(i) # content positions in the list "text1", of all begining of match (e.g. >gnl|UG|Apo#S51012099 [...])
miniList.reverse()
if miniList != []:
length = len(miniList)
ii = 0
Listing1 = []
while ii < length:
iii = miniList[ii]
entry = text1[iii:]
text1 = text1[:iii]
Listing1.append(entry) # each "entry" = list of thing beginning by ">"
ii = ii+1 # Listing1 is a table of table!!
Listing1.append(text1) # "text1" = the first lines (begin with "BLASTN 2.2.1 ...]"
Listing1.reverse()
Listing2 = Listing1[1:] # remove the first thing ("BLASTN ...") and keep only table beginning with a line with ">"
SEK = len(Listing2)
NB_SEK = 0
for e1 in Listing2: # "Listing2" contents all the entries begining with ">"
NB_SEK = NB_SEK + 1
list51 = []
l = -1
for line in e1:
l = l+1
if "Score =" in line:
index = l
list51.append(l) # index of the lines with score
list51.reverse()
Listing3 = []
for i5 in list51:
e2 = e1[i5:]
Listing3.append(e2)
e1 = e1[:i5]
######################################
######################################
### [C] FORMAT MATCH NAME 2nd STEP ###
######################################
######################################
BigFastaName = e1 ### LIST OF LINES <=> What is remaining after removing all the hit with "Score =", so all the text comprise between ">" and the first "Score =" ==> Include Match name & "Length & empty lines
SmallFastaName = BigFastaName[0] ## First line <=> MATCH NAME
SmallFastaName = SmallFastaName[1:-2] ### remove ">" and "\n"
S1 = string.split(SmallFastaName, "||")
S2 = string.split(S1[0], " ")
PutInFastaName1 = S2[0]
##########################################
##########################################
### [C] END FORMAT MATCH NAME 2nd STEP ###
##########################################
##########################################
SUBSEK = len(Listing3)
NB_SUBSEK = 0
list_inBatch = []
### IF NO SUBMATCH WANTED !!!! => ONLY KEEP THE FIRST HIT OF "LISTING3":
if SUBMATCHEU == 0: # NO SUBMATCH WANTED !!!!
Listing4 = []
Listing4.append(Listing3[-1]) # Remove this line if submatch wanted!!!
elif SUBMATCHEU == 1:
Listing4 = Listing3
for l in Listing4: ## "listing3" contents
NB_SUBSEK = NB_SUBSEK+1
ll1 = string.replace(l[0], " ", "")
ll2 = string.replace(l[1], " ", "")
ll3 = string.replace(l[2], " ", "")
PutInFastaName2 = ll1[:-1] + "||" + ll2[:-1] + "||" + ll3[:-1] # match information
#print PutInFastaName2
seq_query = ""
pos_query = []
seq_match = ""
pos_match = []
for line in l:
if "Query:" in line:
line = string.replace(line, " ", " ") # remove multiple spaces in line
line = string.replace(line, " ", " ")
line = string.replace(line, " ", " ")
lll1 = string.split(line, " ") # split the line, 0: "Query=", 1:start, 2:seq, 3:end
pos1 = lll1[1]
pos1 = string.atoi(pos1)
pos_query.append(pos1)
pos2 = lll1[3][:-1]
pos2 = string.atoi(pos2)
pos_query.append(pos2)
seq = lll1[2]
seq_query = seq_query + seq
if "Sbjct:" in line:
line = string.replace(line, " ", " ") # remove multiple spaces in line
line = string.replace(line, " ", " ")
line = string.replace(line, " ", " ")
lll2 = string.split(line, " ") # split the line, 0: "Query=", 1:start, 2:seq, 3:end
pos1 = lll2[1]
pos1 = string.atoi(pos1)
pos_match.append(pos1)
pos2 = lll2[3][:-1]
pos2 = string.atoi(pos2)
pos_match.append(pos2)
seq = lll2[2]
seq_match = seq_match + seq
## Get the query and matched sequences and the corresponding positions
pos_query.sort() # rank small to big
pos_query_start = pos_query[0] # get the smaller
pos_query_end = pos_query[-1] # get the bigger
PutInFastaName3 = "%d...%d" %(pos_query_start, pos_query_end)
######################################
######################################
### [D] FORMAT QUERY NAME 2nd STEP ###
######################################
######################################
FINAL_fasta_Name_Query = ">" + query + "||"+ PutInFastaName3 + "||[[%d/%d]][[%d/%d]]" %(NB_SEK, SEK, NB_SUBSEK,SUBSEK)
##########################################
##########################################
### [D] END FORMAT QUERY NAME 2nd STEP ###
##########################################
##########################################
pos_match.sort()
pos_match_start = pos_match[0]
pos_match_end = pos_match[-1]
PutInFastaName4 = "%d...%d" %(pos_match_start, pos_match_end)
######################################
######################################
### [E] FORMAT MATCH NAME 3rd STEP ###
######################################
######################################
FINAL_fasta_Name_Match = ">" + PutInFastaName1 + "||" + PutInFastaName4 + "||[[%d/%d]][[%d/%d]]" %(NB_SEK, SEK, NB_SUBSEK,SUBSEK)
#FINAL_fasta_Name_Match = ">" + PutInFastaName1 + "||" + PutInFastaName4 + "||" + PutInFastaName2+ "||[[%d/%d]][[%d/%d]]" %(NB_SEK, SEK, NB_SUBSEK,SUBSEK)
##########################################
##########################################
### [E] END FORMAT MATCH NAME 3rd STEP ###
##########################################
##########################################
Pairwise = [FINAL_fasta_Name_Query , seq_query , FINAL_fasta_Name_Match , seq_match] # list with 4 members
list_Pairwise.append(Pairwise)
### Get informations about matches
list_info = get_information_on_matches(l) ### DEF 2.2. ###
#divergence = list_info[6]
F8.close()
#print "\t[get_sequence CLOSE]"
#print list_Pairwise
return(list_Pairwise, list_info)
#########################################
######################
### 2. RUN RUN RUN ###
######################
import string, os, time, re, sys
WORK_DIR = sys.argv[1]
path_in = "%s/11_outputBlast.txt" %WORK_DIR
file_out = open("%s/13_PairwiseMatch.fasta" %WORK_DIR,"w")
file_out3 = open("%s/13_PairwiseNames.csv" %WORK_DIR, "w")
file_out4 = open("%s/13_PairwiseNames_long_names.csv" %WORK_DIR, "w")
file_log = open("%s/13_ParseBLASToutput_ALL.log" %WORK_DIR, "w")
## create Bash1 ##
bash1 = split_file(path_in, "TBLASTX") ### FUNCTION ###
print bash1.keys()
## detect and save match ##
list_hits =[]
list_no_hits = []
j= 0
k = 0
lene = len(bash1.keys())
for query in bash1.keys():
j = j+1
print "\n\n***************** Nb: %d/%d *********************" %(j,lene)
print query
#print bash1[query]
## 2.1. detect matches ##
#print "\n#######"
print "QUERY = <%s>"%query
list_match, list_match2, hit=detect_Matches(query, MATCH, WORK_DIR) ### FUNCTION ###
#print "TEST"
#print "%s" %list_match
#print "OK"
if hit == 1: # match(es)
list_hits.append(query)
if hit == 0: # no match for that sequence
list_no_hits.append(query)
## 2.2. get sequences ##
if hit ==1:
#print ""
list_pairwiseMatch, list_info = get_sequences(query, list_match2, SUBMATCH, WORK_DIR) ### FUNCTION ###
# divergencve
divergence = list_info[6]
#print "Divergence = %s" %divergence
# gap number
gap_number = list_info[7]
#print "Gap number = %s" %gap_number
# real divergence (divergence without accounting INDELs)
real_divergence = list_info[8]
#print "Real Divergence = %s" %real_divergence
# length matched
length_matched = list_info[10]
#print "Length mathced = %s" %length_matched
### WRITE PAIRWISE ALIGNMENT IN OUTPUT FILES
for pairwise in list_pairwiseMatch:
k = k+1
query_name = pairwise[0]
query_seq = pairwise[1]
match_name = pairwise[2]
match_seq = pairwise[3]
len_query_seq = len(query_seq)
# Lis1 = string.split(query_name, "||")
# List11 = string.split(Lis1[0], " ")
# short_query_name = List11[0]
# Lis2 = string.split(match_name, "||")
# List22 = string.split(Lis2[0], " ")
# short_match_name = List22[0]
# If NO CONTROL FOR LENGTH, USE THE FOLLOWING LINES INSTEAD:
file_out.write("%s||%s||%s||%s||%s" %(query_name,divergence,gap_number,real_divergence,length_matched))
file_out.write("\n")
file_out.write("%s" %query_seq)
file_out.write("\n")
#file_out.write("\n")
file_out.write("%s||%s||%s||%s||%s" %(match_name,divergence,gap_number,real_divergence,length_matched))
file_out.write("\n")
file_out.write("%s" %match_seq)
file_out.write("\n")
#file_out.write("\n")
#file_out2.write(match_name)
#file_out2.write("\n")
#file_out2.write(match_seq)
#file_out2.write("\n")
#file_out2.write("\n")
file_out3.write("%s,%s,%s,%s,%s,%s\n" %(query_name[1:], match_name[1:], divergence,gap_number,real_divergence,length_matched))
file_out4.write("%s,%s,%s,%s,%s,%s\n" %(query_name[1:], match_name[1:], divergence,gap_number,real_divergence,length_matched))
# [/CONTROL FOR LENGHT]
#file_out.write("")
#print "\n#######\n"
### Write Summary ###
file_log.write("\n\n************************************************\n")
file_log.write("******************* SUMMARY ********************\n")
file_log.write("************************************************\n\n")
file_log.write("\nNumber of sequences matching something = %d\n" % len(list_hits))
for hiti in list_hits:
file_log.write(hiti)
file_log.write("\n")
file_log.write("\nNumber of sequences matching nothing = %d\n" % len(list_no_hits))
for no_hiti in list_no_hits:
file_log.write(no_hiti)
file_log.write("\n")
file_log.write("\n\n************************************************\n\n")
nb_pairwiseMatches = k
file_log.write("Total number of pairwise matches = %d\n" %nb_pairwiseMatches)
file_out.close()
#file_out2.close()
#file_out3.close()
file_log.close()
#file_out4.close()
os.system("rm %s/tmp/*" %WORK_DIR)
|
abice-sbr/adaptsearch
|
12_scriptExtractMatch_v20_BLASTX.py
|
Python
|
gpl-3.0
| 24,382
|
"""Helpers for config validation using voluptuous."""
from datetime import (timedelta, datetime as datetime_sys,
time as time_sys, date as date_sys)
import os
import re
from urllib.parse import urlparse
from socket import _GLOBAL_DEFAULT_TIMEOUT
import logging
import inspect
from typing import Any, Union, TypeVar, Callable, Sequence, Dict
import voluptuous as vol
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, CONF_TIMEOUT, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC,
ENTITY_MATCH_ALL)
from homeassistant.core import valid_entity_id, split_entity_id
from homeassistant.exceptions import TemplateError
import homeassistant.util.dt as dt_util
from homeassistant.util import slugify as util_slugify
from homeassistant.helpers import template as template_helper
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar('T')
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validate that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isdevice(value):
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError:
raise vol.Invalid('No device at {} found'.format(value))
def matches_regex(regex):
"""Validate that the value is a string that matches a regex."""
regex = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid('not a string value: {}'.format(value))
if not regex.match(value):
raise vol.Invalid('value {} does not match regular expression {}'
.format(value, regex.pattern))
return value
return validator
def is_regex(value):
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError:
raise vol.Invalid("value {} is of the wrong type for a regular "
"expression".format(value))
except re.error:
raise vol.Invalid("value {} is not a valid regular expression".format(
value))
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid('None is not file')
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid('not a file')
if not os.access(file_in, os.R_OK):
raise vol.Invalid('file not readable')
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid('not a directory')
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid('not a directory')
if not os.access(dir_in, os.R_OK):
raise vol.Invalid('directory not readable')
return dir_in
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value: Union[str, Sequence]) -> Sequence[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
comp_entity_ids = vol.Any(
vol.All(vol.Lower, ENTITY_MATCH_ALL),
entity_ids
)
def entity_domain(domain: str):
"""Validate that entity belong to domain."""
def validate(value: Any) -> str:
"""Test if entity domain is domain."""
ent_domain = entities_domain(domain)
return ent_domain(value)[0]
return validate
def entities_domain(domain: str):
"""Validate that entities belong to domain."""
def validate(values: Union[str, Sequence]) -> Sequence[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if split_entity_id(ent_id)[0] != domain:
raise vol.Invalid(
"Entity ID '{}' does not belong to domain '{}'"
.format(ent_id, domain))
return values
return validate
def enum(enumClass):
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value):
"""Validate icon."""
value = str(value)
if value.startswith('mdi:'):
return value
raise vol.Invalid('Icons should start with prefix "mdi:"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'minutes': vol.Coerce(int),
'seconds': vol.Coerce(int),
'milliseconds': vol.Coerce(int),
}),
has_at_least_one_key('days', 'hours', 'minutes',
'seconds', 'milliseconds'),
lambda value: timedelta(**value))
def time(value) -> time_sys:
"""Validate and transform a time."""
if isinstance(value, time_sys):
return value
try:
time_val = dt_util.parse_time(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if time_val is None:
raise vol.Invalid('Invalid time specified: {}'.format(value))
return time_val
def date(value) -> date_sys:
"""Validate and transform a date."""
if isinstance(value, date_sys):
return value
try:
date_val = dt_util.parse_date(value)
except TypeError:
raise vol.Invalid('Not a parseable type')
if date_val is None:
raise vol.Invalid("Could not parse date")
return date_val
def time_period_str(value: str) -> timedelta:
"""Validate and transform time offset."""
if isinstance(value, int):
raise vol.Invalid('Make sure you wrap time values in quotes')
elif not isinstance(value, str):
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
negative_offset = False
if value.startswith('-'):
negative_offset = True
value = value[1:]
elif value.startswith('+'):
value = value[1:]
try:
parsed = [int(x) for x in value.split(':')]
except ValueError:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
if len(parsed) == 2:
hour, minute = parsed
second = 0
elif len(parsed) == 3:
hour, minute, second = parsed
else:
raise vol.Invalid(TIME_PERIOD_ERROR.format(value))
offset = timedelta(hours=hour, minutes=minute, seconds=second)
if negative_offset:
offset *= -1
return offset
def time_period_seconds(value: Union[int, str]) -> timedelta:
"""Validate and transform seconds to a time offset."""
try:
return timedelta(seconds=int(value))
except (ValueError, TypeError):
raise vol.Invalid('Expected seconds, got {}'.format(value))
time_period = vol.Any(time_period_str, time_period_seconds, timedelta,
time_period_dict)
def match_all(value):
"""Validate that matches all values."""
return value
def positive_timedelta(value: timedelta) -> timedelta:
"""Validate timedelta is positive."""
if value < timedelta(0):
raise vol.Invalid('Time period should be positive')
return value
def service(value):
"""Validate service."""
# Services use same format as entities so we can use same helper.
if valid_entity_id(value):
return value
raise vol.Invalid('Service {} does not match format <domain>.<name>'
.format(value))
def slug(value):
"""Validate value is a valid slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
value = str(value)
slg = util_slugify(value)
if value == slg:
return value
raise vol.Invalid('invalid slug {} (try {})'.format(value, slg))
def slugify(value):
"""Coerce a value to a slug."""
if value is None:
raise vol.Invalid('Slug should not be None')
slg = util_slugify(str(value))
if slg:
return slg
raise vol.Invalid('Unable to slugify {}'.format(value))
def string(value: Any) -> str:
"""Coerce value to string, except for None."""
if value is None:
raise vol.Invalid('string value is None')
if isinstance(value, (list, dict)):
raise vol.Invalid('value should be a string')
return str(value)
def temperature_unit(value) -> str:
"""Validate and transform temperature unit."""
value = str(value).upper()
if value == 'C':
return TEMP_CELSIUS
if value == 'F':
return TEMP_FAHRENHEIT
raise vol.Invalid('invalid temperature unit (expected C or F)')
unit_system = vol.All(vol.Lower, vol.Any(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL))
def template(value):
"""Validate a jinja2 template."""
if value is None:
raise vol.Invalid('template value is None')
elif isinstance(value, (list, dict, template_helper.Template)):
raise vol.Invalid('template value should be a string')
value = template_helper.Template(str(value))
try:
value.ensure_valid()
return value
except TemplateError as ex:
raise vol.Invalid('invalid template ({})'.format(ex))
def template_complex(value):
"""Validate a complex jinja2 template."""
if isinstance(value, list):
for idx, element in enumerate(value):
value[idx] = template_complex(element)
return value
if isinstance(value, dict):
for key, element in value.items():
value[key] = template_complex(element)
return value
return template(value)
def datetime(value):
"""Validate datetime."""
if isinstance(value, datetime_sys):
return value
try:
date_val = dt_util.parse_datetime(value)
except TypeError:
date_val = None
if date_val is None:
raise vol.Invalid('Invalid datetime specified: {}'.format(value))
return date_val
def time_zone(value):
"""Validate timezone."""
if dt_util.get_time_zone(value) is not None:
return value
raise vol.Invalid(
'Invalid time zone passed in. Valid options can be found here: '
'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
weekdays = vol.All(ensure_list, [vol.In(WEEKDAYS)])
def socket_timeout(value):
"""Validate timeout float > 0.0.
None coerced to socket._GLOBAL_DEFAULT_TIMEOUT bare object.
"""
if value is None:
return _GLOBAL_DEFAULT_TIMEOUT
try:
float_value = float(value)
if float_value > 0.0:
return float_value
raise vol.Invalid('Invalid socket timeout value.'
' float > 0.0 required.')
except Exception as _:
raise vol.Invalid('Invalid socket timeout: {err}'.format(err=_))
# pylint: disable=no-value-for-parameter
def url(value: Any) -> str:
"""Validate an URL."""
url_in = str(value)
if urlparse(url_in).scheme in ['http', 'https']:
return vol.Schema(vol.Url())(url_in)
raise vol.Invalid('invalid url')
def x10_address(value):
"""Validate an x10 address."""
regex = re.compile(r'([A-Pa-p]{1})(?:[2-9]|1[0-6]?)$')
if not regex.match(value):
raise vol.Invalid('Invalid X10 Address')
return str(value).lower()
def ensure_list_csv(value: Any) -> Sequence:
"""Ensure that input is a list or make one from comma-separated string."""
if isinstance(value, str):
return [member.strip() for member in value.split(',')]
return ensure_list(value)
def deprecated(key):
"""Log key as deprecated."""
module_name = inspect.getmodule(inspect.stack()[1][0]).__name__
def validator(config):
"""Check if key is in config and log warning."""
if key in config:
logging.getLogger(module_name).warning(
"The '%s' option (with value '%s') is deprecated, please "
"remove it from your configuration.", key, config[key])
return config
return validator
# Validator helpers
def key_dependency(key, dependency):
"""Validate that all dependencies exist for key."""
def validator(value):
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid('key dependencies require a dict')
if key in value and dependency not in value:
raise vol.Invalid('dependency violation - key "{}" requires '
'key "{}" to exist'.format(key, dependency))
return value
return validator
# Schemas
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): string,
vol.Optional(CONF_SCAN_INTERVAL): time_period
}, extra=vol.ALLOW_EXTRA)
EVENT_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required('event'): string,
vol.Optional('event_data'): dict,
vol.Optional('event_data_template'): {match_all: template_complex}
})
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Exclusive('service', 'service name'): service,
vol.Exclusive('service_template', 'service name'): template,
vol.Optional('data'): dict,
vol.Optional('data_template'): {match_all: template_complex},
vol.Optional(CONF_ENTITY_ID): entity_ids,
}), has_at_least_one_key('service', 'service_template'))
NUMERIC_STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'numeric_state',
vol.Required(CONF_ENTITY_ID): entity_id,
CONF_BELOW: vol.Coerce(float),
CONF_ABOVE: vol.Coerce(float),
vol.Optional(CONF_VALUE_TEMPLATE): template,
}), has_at_least_one_key(CONF_BELOW, CONF_ABOVE))
STATE_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'state',
vol.Required(CONF_ENTITY_ID): entity_id,
vol.Required('state'): str,
vol.Optional('for'): vol.All(time_period, positive_timedelta),
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('from'): str,
}), key_dependency('for', 'state'))
SUN_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'sun',
vol.Optional('before'): sun_event,
vol.Optional('before_offset'): time_period,
vol.Optional('after'): vol.All(vol.Lower, vol.Any(
SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)),
vol.Optional('after_offset'): time_period,
}), has_at_least_one_key('before', 'after'))
TEMPLATE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'template',
vol.Required(CONF_VALUE_TEMPLATE): template,
})
TIME_CONDITION_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_CONDITION): 'time',
'before': time,
'after': time,
'weekday': weekdays,
}), has_at_least_one_key('before', 'after', 'weekday'))
ZONE_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'zone',
vol.Required(CONF_ENTITY_ID): entity_id,
'zone': entity_id,
# To support use_trigger_value in automation
# Deprecated 2016/04/25
vol.Optional('event'): vol.Any('enter', 'leave'),
})
AND_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'and',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
OR_CONDITION_SCHEMA = vol.Schema({
vol.Required(CONF_CONDITION): 'or',
vol.Required('conditions'): vol.All(
ensure_list,
# pylint: disable=unnecessary-lambda
[lambda value: CONDITION_SCHEMA(value)],
)
})
CONDITION_SCHEMA = vol.Any(
NUMERIC_STATE_CONDITION_SCHEMA,
STATE_CONDITION_SCHEMA,
SUN_CONDITION_SCHEMA,
TEMPLATE_CONDITION_SCHEMA,
TIME_CONDITION_SCHEMA,
ZONE_CONDITION_SCHEMA,
AND_CONDITION_SCHEMA,
OR_CONDITION_SCHEMA,
)
_SCRIPT_DELAY_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("delay"): vol.Any(
vol.All(time_period, positive_timedelta),
template, template_complex)
})
_SCRIPT_WAIT_TEMPLATE_SCHEMA = vol.Schema({
vol.Optional(CONF_ALIAS): string,
vol.Required("wait_template"): template,
vol.Optional(CONF_TIMEOUT): vol.All(time_period, positive_timedelta),
vol.Optional("continue_on_timeout"): boolean,
})
SCRIPT_SCHEMA = vol.All(
ensure_list,
[vol.Any(SERVICE_SCHEMA, _SCRIPT_DELAY_SCHEMA,
_SCRIPT_WAIT_TEMPLATE_SCHEMA, EVENT_SCHEMA, CONDITION_SCHEMA)],
)
|
tinloaf/home-assistant
|
homeassistant/helpers/config_validation.py
|
Python
|
apache-2.0
| 18,532
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from quarry.net.server import ServerFactory, ServerProtocol
from random import randint
import packet as p
import permissions as perms
import randomdata as dats
import commands as cmd
import json
from sys import exit as sysex
from server_core.chat import pushChat, pushChatCall
eobj_byid = {}
anim_i = 0
class Mineserver(ServerProtocol):
plugins = ["Mineserver Core", "TotallyNotAPlugin", "example"]
def packet_login_start(self, buff):
if not options.down:
ServerProtocol.packet_login_start(self, buff)
else:
buff.discard()
self.close(options.downmsg)
def player_joined(self):
ServerProtocol.player_joined(self)
self.ip = self.remote_addr.host
self.eid = dats.getFreeId()
self.fquid = self.username + "[/" + self.ip + "](" + str(self.uuid) + ")"
self.base_scba_split = list("PyMINESERVER")
self.anim_i = 0
eobj_byid[self.eid] = self
self.logger.info("UUID of player Dragon5232 is " + str(self.uuid))
p.game(self, self.eid, 1, 0, 1, options.maxplayers, "default", False)
p.spawn_pos(self, 0, 66, 0)
p.abilities(self, True, True, True, True, 0.2, 0.2)
p.pos_look(self, 0, 66, 0, 0, 0, False)
p.rain(self, True)
p.empty_chunk(self, 0, 0)
p.block_change(self, 0, 64, 0, 1)
if self.protocol_version == 47: p.plist_head_foot(self, u"§6P§2yMINESERVER§r", u"§eEnjoy the Test§r")
if self.protocol_version == 47: self.tasks.add_loop(1.0/20, self.anim_frame_scb)
relayPlayerList()
self.logger.info(self.username + "[/" + self.ip + "] logged in with entity id " + str(self.eid) + " at ([nonexistent](0.0, 64.0, 0.0))")
# Schedule 6-second sending of keep-alive packets.
self.tasks.add_loop(6, self.keepalive_send)
self.eobj_byid = eobj_byid
pushChat(self, "\u00A7e" + self.username + " has joined the game\u00A7r", 1)
# Send welcome title and subtitle
p.title(self, options.wtitle)
p.subtitle(self, options.wst)
p.chat_json(self, dats.join_json(self), 1)
def player_left(self):
ServerProtocol.player_left(self)
self.eobj_byid = eobj_byid
pushChatCall(self, "\u00A7e" + self.username + " has left the game\u00A7r", 1, self.destroy)
def keepalive_send(self):
self.last_keepalive = random_digits(randint(4, 9))
p.keep_alive(self, self.last_keepalive)
def packet_keep_alive(self, buff):
if buff.unpack_varint() == self.last_keepalive: pass
else:
if self.keepalive_miss < 4:
self.keepalive_miss += 1
else:
buff.discard()
self.logger.info("Kicking player " + self.username + " for not responding to keepalives for 24 seconds.")
self.close("Timed out: did not ping for 24 seconds.")
def packet_chat_message(self, buff):
_atmp = buff.unpack_string()
self.eobj_byid = eobj_byid
cmd.handle(self, _atmp) if _atmp[0] == "/" else pushChat(self, "<" + self.username + "> " + _atmp.replace("\u00A7", ""), 0)
def destroy(self):
eobj_byid[self.eid] = None
def nothing(self):
random = "2random4me"
def anim_frame_scb(self):
self.sstmp = self.base_scba_split
if self.anim_i >= len(self.sstmp): self.anim_i = 0
self.sstmp[self.anim_i] = u"§6" + self.sstmp[anim_i] + u"§2"
p.plist_head_foot(self, u"§2" + sstmp + u"§r", u"§eEnjoy the Test§r")
anim_i += 1
class MineFactory(ServerFactory):
protocol = Mineserver
def random_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
def relayPlayerList():
return None
def main(args):
# Parse options
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option("-a", "--host", dest="host", default="", help="address to listen on")
parser.add_option("-p", "--port", dest="port", default="25565", type="int", help="port to listen on")
parser.add_option("-m", "--motd", dest="motd", default="PyMineserver: Test. Hello! Now with joining! §b§l\\o/§r", type="string", help="motd to send to clients")
parser.add_option("-o", "--offline", action="store_false", dest="auth", default=True, help="offline mode does not authenticate players!")
parser.add_option("-k", "--downtime", action="store_true", dest="down", default=False, help="kick players with downtimemsg")
parser.add_option("-q", "--downtimemsg", dest="downmsg", default="Sorry, but this server is currently down for maintenance. Check back soon!", help="message to kick for downtime with")
parser.add_option("-w", "--wtitle", dest="wtitle", default="Welcome to Mineserver!", help="title to display on join")
parser.add_option("-s", "--wsubtitle", dest="wst", default="Enjoy this test server!", help="subtitle to display on join")
parser.add_option("-l", "--max-players", dest="maxplayers", default=20, help="max player count/limit")
parser.add_option("-f", "--favicon", dest="favicon", default="creeper.png", help="relative path to server favicon in png")
global options
(options, args) = parser.parse_args(args)
# Warn about auth mode
if options.auth:
print("Mineserver (warn/INFO)> Mineserver is running in online mode. All players must be authenticated to join. *(ONLINE)*")
else:
print("Mineserver (WARN/info)> Mineserver is running in offline mode. Players can join with fake UUIDs and names without authentication! *[OFFLINE]*!")
# Create factory
factory = MineFactory()
factory.motd = options.motd
factory.online_mode = options.auth
factory.favicon = options.favicon
# Listen
factory.listen(options.host, options.port)
factory.run()
if __name__ == "__main__":
print("Mineserver (warn/CRIT)> You should NOT be invoking this directly! Use mineserver.py.")
sysex(1)
|
Armored-Dragon/pymineserver
|
server_core/server.py
|
Python
|
bsd-2-clause
| 6,160
|
edad = 24
mensaje = "¡Feliz cumpleaños número " + str(edad) + "!"
print(mensaje)
|
byque/programacion_en_python
|
b-variables_y_tipos_simples_de_datos/bf-imprimir_numeros_sin_error.py
|
Python
|
gpl-3.0
| 85
|
#!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_host_record
version_added: "2.5"
author: "Peter Sprygada (@privateip)"
short_description: Configure Infoblox NIOS host records
description:
- Adds and/or removes instances of host record objects from
Infoblox NIOS servers. This module manages NIOS C(record:host) objects
using the Infoblox WAPI interface over REST.
- Updates instances of host record object from Infoblox NIOS servers.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system. User can also update the hostname as it is possible
to pass a dict containing I(new_name), I(old_name). See examples.
required: true
view:
description:
- Sets the DNS view to associate this host record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
configure_for_dns:
version_added: "2.7"
description:
- Sets the DNS to particular parent. If user needs to bypass DNS
user can make the value to false.
type: bool
required: false
default: true
aliases:
- dns
ipv4addrs:
description:
- Configures the IPv4 addresses for this host record. This argument
accepts a list of values (see suboptions)
aliases:
- ipv4
suboptions:
ipv4addr:
description:
- Configures the IPv4 address for the host record
required: true
aliases:
- address
configure_for_dhcp:
description:
- Configure the host_record over DHCP instead of DNS, if user
changes it to true, user need to mention MAC address to configure
required: false
aliases:
- dhcp
mac:
description:
- Configures the hardware MAC address for the host record. If user makes
DHCP to true, user need to mention MAC address.
required: false
aliases:
- mac
ipv6addrs:
description:
- Configures the IPv6 addresses for the host record. This argument
accepts a list of values (see options)
aliases:
- ipv6
suboptions:
ipv6addr:
description:
- Configures the IPv6 address for the host record
required: true
aliases:
- address
configure_for_dhcp:
description:
- Configure the host_record over DHCP instead of DNS, if user
changes it to true, user need to mention MAC address to configure
required: false
aliases:
- dhcp
aliases:
version_added: "2.6"
description:
- Configures an optional list of additional aliases to add to the host
record. These are equivalent to CNAMEs but held within a host
record. Must be in list format.
ttl:
description:
- Configures the TTL to be associated with this host record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure an ipv4 host record
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
aliases:
- cname.ansible.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing host record
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a host record from the system
nios_host_record:
name: host.ansible.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update an ipv4 host record
nios_host_record:
name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
ipv4:
- address: 192.168.10.1
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create an ipv4 host record bypassing DNS
nios_host_record:
name: new_host
ipv4:
- address: 192.168.10.1
dns: false
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create an ipv4 host record over DHCP
nios_host_record:
name: host.ansible.com
ipv4:
- address: 192.168.10.1
dhcp: true
mac: 00-80-C8-E3-4C-BD
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
def ipaddr(module, key, filtered_keys=None):
''' Transforms the input value into a struct supported by WAPI
This function will transform the input from the playbook into a struct
that is valid for WAPI in the form of:
{
ipv4addr: <value>,
mac: <value>
}
This function does not validate the values are properly formatted or in
the acceptable range, that is left to WAPI.
'''
filtered_keys = filtered_keys or list()
objects = list()
for item in module.params[key]:
objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
return objects
def ipv4addrs(module):
return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
def ipv6addrs(module):
return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
def main():
''' Main entry point for module execution
'''
ipv4addr_spec = dict(
ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
mac=dict(required=False, aliases=['mac'], ib_req=True)
)
ipv6addr_spec = dict(
ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
configure_for_dhcp=dict(type='bool', required=False, aliases=['configure_for_dhcp'], ib_req=True),
mac=dict(required=False, aliases=['mac'], ib_req=True)
)
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
aliases=dict(type='list'),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_HOST_RECORD, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
trondhindenes/ansible
|
lib/ansible/modules/net_tools/nios/nios_host_record.py
|
Python
|
gpl-3.0
| 8,805
|
TThostFtdcTraderIDType = "string"
TThostFtdcInvestorIDType = "string"
TThostFtdcBrokerIDType = "string"
TThostFtdcBrokerAbbrType = "string"
TThostFtdcBrokerNameType = "string"
TThostFtdcExchangeInstIDType = "string"
TThostFtdcOrderRefType = "string"
TThostFtdcParticipantIDType = "string"
TThostFtdcUserIDType = "string"
TThostFtdcPasswordType = "string"
TThostFtdcClientIDType = "string"
TThostFtdcInstrumentIDType = "string"
TThostFtdcInstrumentCodeType = "string"
TThostFtdcMarketIDType = "string"
TThostFtdcProductNameType = "string"
TThostFtdcExchangeIDType = "string"
TThostFtdcExchangeNameType = "string"
TThostFtdcExchangeAbbrType = "string"
TThostFtdcExchangeFlagType = "string"
TThostFtdcMacAddressType = "string"
TThostFtdcSystemIDType = "string"
TThostFtdcExchangePropertyType = "char"
TThostFtdcDateType = "string"
TThostFtdcTimeType = "string"
TThostFtdcLongTimeType = "string"
TThostFtdcInstrumentNameType = "string"
TThostFtdcSettlementGroupIDType = "string"
TThostFtdcOrderSysIDType = "string"
TThostFtdcTradeIDType = "string"
TThostFtdcCommandTypeType = "string"
TThostFtdcIPAddressType = "string"
TThostFtdcIPPortType = "int"
TThostFtdcProductInfoType = "string"
TThostFtdcProtocolInfoType = "string"
TThostFtdcBusinessUnitType = "string"
TThostFtdcDepositSeqNoType = "string"
TThostFtdcIdentifiedCardNoType = "string"
TThostFtdcIdCardTypeType = "char"
TThostFtdcOrderLocalIDType = "string"
TThostFtdcUserNameType = "string"
TThostFtdcPartyNameType = "string"
TThostFtdcErrorMsgType = "string"
TThostFtdcFieldNameType = "string"
TThostFtdcFieldContentType = "string"
TThostFtdcSystemNameType = "string"
TThostFtdcContentType = "string"
TThostFtdcInvestorRangeType = "char"
TThostFtdcDepartmentRangeType = "char"
TThostFtdcDataSyncStatusType = "char"
TThostFtdcBrokerDataSyncStatusType = "char"
TThostFtdcExchangeConnectStatusType = "char"
TThostFtdcTraderConnectStatusType = "char"
TThostFtdcFunctionCodeType = "char"
TThostFtdcBrokerFunctionCodeType = "char"
TThostFtdcOrderActionStatusType = "char"
TThostFtdcOrderStatusType = "char"
TThostFtdcOrderSubmitStatusType = "char"
TThostFtdcPositionDateType = "char"
TThostFtdcPositionDateTypeType = "char"
TThostFtdcTradingRoleType = "char"
TThostFtdcProductClassType = "char"
TThostFtdcInstLifePhaseType = "char"
TThostFtdcDirectionType = "char"
TThostFtdcPositionTypeType = "char"
TThostFtdcPosiDirectionType = "char"
TThostFtdcSysSettlementStatusType = "char"
TThostFtdcRatioAttrType = "char"
TThostFtdcHedgeFlagType = "char"
TThostFtdcBillHedgeFlagType = "char"
TThostFtdcClientIDTypeType = "char"
TThostFtdcOrderPriceTypeType = "char"
TThostFtdcOffsetFlagType = "char"
TThostFtdcForceCloseReasonType = "char"
TThostFtdcOrderTypeType = "char"
TThostFtdcTimeConditionType = "char"
TThostFtdcVolumeConditionType = "char"
TThostFtdcContingentConditionType = "char"
TThostFtdcActionFlagType = "char"
TThostFtdcTradingRightType = "char"
TThostFtdcOrderSourceType = "char"
TThostFtdcTradeTypeType = "char"
TThostFtdcPriceSourceType = "char"
TThostFtdcInstrumentStatusType = "char"
TThostFtdcInstStatusEnterReasonType = "char"
TThostFtdcOrderActionRefType = "int"
TThostFtdcInstallCountType = "int"
TThostFtdcInstallIDType = "int"
TThostFtdcErrorIDType = "int"
TThostFtdcSettlementIDType = "int"
TThostFtdcVolumeType = "int"
TThostFtdcFrontIDType = "int"
TThostFtdcSessionIDType = "int"
TThostFtdcSequenceNoType = "int"
TThostFtdcCommandNoType = "int"
TThostFtdcMillisecType = "int"
TThostFtdcVolumeMultipleType = "int"
TThostFtdcTradingSegmentSNType = "int"
TThostFtdcRequestIDType = "int"
TThostFtdcYearType = "int"
TThostFtdcMonthType = "int"
TThostFtdcBoolType = "int"
TThostFtdcPriceType = "double"
TThostFtdcCombOffsetFlagType = "string"
TThostFtdcCombHedgeFlagType = "string"
TThostFtdcRatioType = "double"
TThostFtdcMoneyType = "double"
TThostFtdcLargeVolumeType = "double"
TThostFtdcSequenceSeriesType = "int"
TThostFtdcCommPhaseNoType = "int"
TThostFtdcSequenceLabelType = "string"
TThostFtdcUnderlyingMultipleType = "double"
TThostFtdcPriorityType = "int"
TThostFtdcContractCodeType = "string"
TThostFtdcCityType = "string"
TThostFtdcIsStockType = "string"
TThostFtdcChannelType = "string"
TThostFtdcAddressType = "string"
TThostFtdcZipCodeType = "string"
TThostFtdcTelephoneType = "string"
TThostFtdcFaxType = "string"
TThostFtdcMobileType = "string"
TThostFtdcEMailType = "string"
TThostFtdcMemoType = "string"
TThostFtdcCompanyCodeType = "string"
TThostFtdcWebsiteType = "string"
TThostFtdcTaxNoType = "string"
TThostFtdcBatchStatusType = "char"
TThostFtdcPropertyIDType = "string"
TThostFtdcPropertyNameType = "string"
TThostFtdcLicenseNoType = "string"
TThostFtdcAgentIDType = "string"
TThostFtdcAgentNameType = "string"
TThostFtdcAgentGroupIDType = "string"
TThostFtdcAgentGroupNameType = "string"
TThostFtdcReturnStyleType = "char"
TThostFtdcReturnPatternType = "char"
TThostFtdcReturnLevelType = "char"
TThostFtdcReturnStandardType = "char"
TThostFtdcMortgageTypeType = "char"
TThostFtdcInvestorSettlementParamIDType = "char"
TThostFtdcExchangeSettlementParamIDType = "char"
TThostFtdcSystemParamIDType = "char"
TThostFtdcTradeParamIDType = "char"
TThostFtdcSettlementParamValueType = "string"
TThostFtdcCounterIDType = "string"
TThostFtdcInvestorGroupNameType = "string"
TThostFtdcBrandCodeType = "string"
TThostFtdcWarehouseType = "string"
TThostFtdcProductDateType = "string"
TThostFtdcGradeType = "string"
TThostFtdcClassifyType = "string"
TThostFtdcPositionType = "string"
TThostFtdcYieldlyType = "string"
TThostFtdcWeightType = "string"
TThostFtdcSubEntryFundNoType = "int"
TThostFtdcFileIDType = "char"
TThostFtdcFileNameType = "string"
TThostFtdcFileTypeType = "char"
TThostFtdcFileFormatType = "char"
TThostFtdcFileUploadStatusType = "char"
TThostFtdcTransferDirectionType = "char"
TThostFtdcUploadModeType = "string"
TThostFtdcAccountIDType = "string"
TThostFtdcBankFlagType = "string"
TThostFtdcBankAccountType = "string"
TThostFtdcOpenNameType = "string"
TThostFtdcOpenBankType = "string"
TThostFtdcBankNameType = "string"
TThostFtdcPublishPathType = "string"
TThostFtdcOperatorIDType = "string"
TThostFtdcMonthCountType = "int"
TThostFtdcAdvanceMonthArrayType = "string"
TThostFtdcDateExprType = "string"
TThostFtdcInstrumentIDExprType = "string"
TThostFtdcInstrumentNameExprType = "string"
TThostFtdcSpecialCreateRuleType = "char"
TThostFtdcBasisPriceTypeType = "char"
TThostFtdcProductLifePhaseType = "char"
TThostFtdcDeliveryModeType = "char"
TThostFtdcLogLevelType = "string"
TThostFtdcProcessNameType = "string"
TThostFtdcOperationMemoType = "string"
TThostFtdcFundIOTypeType = "char"
TThostFtdcFundTypeType = "char"
TThostFtdcFundDirectionType = "char"
TThostFtdcFundStatusType = "char"
TThostFtdcBillNoType = "string"
TThostFtdcBillNameType = "string"
TThostFtdcPublishStatusType = "char"
TThostFtdcEnumValueIDType = "string"
TThostFtdcEnumValueTypeType = "string"
TThostFtdcEnumValueLabelType = "string"
TThostFtdcEnumValueResultType = "string"
TThostFtdcSystemStatusType = "char"
TThostFtdcSettlementStatusType = "char"
TThostFtdcRangeIntTypeType = "string"
TThostFtdcRangeIntFromType = "string"
TThostFtdcRangeIntToType = "string"
TThostFtdcFunctionIDType = "string"
TThostFtdcFunctionValueCodeType = "string"
TThostFtdcFunctionNameType = "string"
TThostFtdcRoleIDType = "string"
TThostFtdcRoleNameType = "string"
TThostFtdcDescriptionType = "string"
TThostFtdcCombineIDType = "string"
TThostFtdcCombineTypeType = "string"
TThostFtdcInvestorTypeType = "char"
TThostFtdcBrokerTypeType = "char"
TThostFtdcRiskLevelType = "char"
TThostFtdcFeeAcceptStyleType = "char"
TThostFtdcPasswordTypeType = "char"
TThostFtdcAlgorithmType = "char"
TThostFtdcIncludeCloseProfitType = "char"
TThostFtdcAllWithoutTradeType = "char"
TThostFtdcCommentType = "string"
TThostFtdcVersionType = "string"
TThostFtdcTradeCodeType = "string"
TThostFtdcTradeDateType = "string"
TThostFtdcTradeTimeType = "string"
TThostFtdcTradeSerialType = "string"
TThostFtdcTradeSerialNoType = "int"
TThostFtdcFutureIDType = "string"
TThostFtdcBankIDType = "string"
TThostFtdcBankBrchIDType = "string"
TThostFtdcBankBranchIDType = "string"
TThostFtdcOperNoType = "string"
TThostFtdcDeviceIDType = "string"
TThostFtdcRecordNumType = "string"
TThostFtdcFutureAccountType = "string"
TThostFtdcFuturePwdFlagType = "char"
TThostFtdcTransferTypeType = "char"
TThostFtdcFutureAccPwdType = "string"
TThostFtdcCurrencyCodeType = "string"
TThostFtdcRetCodeType = "string"
TThostFtdcRetInfoType = "string"
TThostFtdcTradeAmtType = "string"
TThostFtdcUseAmtType = "string"
TThostFtdcFetchAmtType = "string"
TThostFtdcTransferValidFlagType = "char"
TThostFtdcCertCodeType = "string"
TThostFtdcReasonType = "char"
TThostFtdcFundProjectIDType = "string"
TThostFtdcSexType = "char"
TThostFtdcProfessionType = "string"
TThostFtdcNationalType = "string"
TThostFtdcProvinceType = "string"
TThostFtdcRegionType = "string"
TThostFtdcCountryType = "string"
TThostFtdcLicenseNOType = "string"
TThostFtdcCompanyTypeType = "string"
TThostFtdcBusinessScopeType = "string"
TThostFtdcCapitalCurrencyType = "string"
TThostFtdcUserTypeType = "char"
TThostFtdcBranchIDType = "string"
TThostFtdcRateTypeType = "char"
TThostFtdcNoteTypeType = "char"
TThostFtdcSettlementStyleType = "char"
TThostFtdcBrokerDNSType = "string"
TThostFtdcSentenceType = "string"
TThostFtdcSettlementBillTypeType = "char"
TThostFtdcUserRightTypeType = "char"
TThostFtdcMarginPriceTypeType = "char"
TThostFtdcBillGenStatusType = "char"
TThostFtdcAlgoTypeType = "char"
TThostFtdcHandlePositionAlgoIDType = "char"
TThostFtdcFindMarginRateAlgoIDType = "char"
TThostFtdcHandleTradingAccountAlgoIDType = "char"
TThostFtdcPersonTypeType = "char"
TThostFtdcQueryInvestorRangeType = "char"
TThostFtdcInvestorRiskStatusType = "char"
TThostFtdcLegIDType = "int"
TThostFtdcLegMultipleType = "int"
TThostFtdcImplyLevelType = "int"
TThostFtdcClearAccountType = "string"
TThostFtdcOrganNOType = "string"
TThostFtdcClearbarchIDType = "string"
TThostFtdcUserEventTypeType = "char"
TThostFtdcUserEventInfoType = "string"
TThostFtdcCloseStyleType = "char"
TThostFtdcStatModeType = "char"
TThostFtdcParkedOrderStatusType = "char"
TThostFtdcParkedOrderIDType = "string"
TThostFtdcParkedOrderActionIDType = "string"
TThostFtdcVirDealStatusType = "char"
TThostFtdcOrgSystemIDType = "char"
TThostFtdcVirTradeStatusType = "char"
TThostFtdcVirBankAccTypeType = "char"
TThostFtdcVirementStatusType = "char"
TThostFtdcVirementAvailAbilityType = "char"
TThostFtdcVirementTradeCodeType = "char"
TThostFtdcPhotoTypeNameType = "string"
TThostFtdcPhotoTypeIDType = "string"
TThostFtdcPhotoNameType = "string"
TThostFtdcTopicIDType = "int"
TThostFtdcReportTypeIDType = "string"
TThostFtdcCharacterIDType = "string"
TThostFtdcAMLParamIDType = "string"
TThostFtdcAMLInvestorTypeType = "string"
TThostFtdcAMLIdCardTypeType = "string"
TThostFtdcAMLTradeDirectType = "string"
TThostFtdcAMLTradeModelType = "string"
TThostFtdcAMLParamIDType = "string"
TThostFtdcAMLOpParamValueType = "double"
TThostFtdcAMLCustomerCardTypeType = "string"
TThostFtdcAMLInstitutionNameType = "string"
TThostFtdcAMLDistrictIDType = "string"
TThostFtdcAMLRelationShipType = "string"
TThostFtdcAMLInstitutionTypeType = "string"
TThostFtdcAMLInstitutionIDType = "string"
TThostFtdcAMLAccountTypeType = "string"
TThostFtdcAMLTradingTypeType = "string"
TThostFtdcAMLTransactClassType = "string"
TThostFtdcAMLCapitalIOType = "string"
TThostFtdcAMLSiteType = "string"
TThostFtdcAMLCapitalPurposeType = "string"
TThostFtdcAMLReportTypeType = "string"
TThostFtdcAMLSerialNoType = "string"
TThostFtdcAMLStatusType = "string"
TThostFtdcAMLGenStatusType = "char"
TThostFtdcAMLSeqCodeType = "string"
TThostFtdcAMLFileNameType = "string"
TThostFtdcAMLMoneyType = "double"
TThostFtdcAMLFileAmountType = "int"
TThostFtdcCFMMCKeyType = "string"
TThostFtdcCFMMCTokenType = "string"
TThostFtdcCFMMCKeyKindType = "char"
TThostFtdcAMLReportNameType = "string"
TThostFtdcIndividualNameType = "string"
TThostFtdcCurrencyIDType = "string"
TThostFtdcCustNumberType = "string"
TThostFtdcOrganCodeType = "string"
TThostFtdcOrganNameType = "string"
TThostFtdcSuperOrganCodeType = "string"
TThostFtdcSubBranchIDType = "string"
TThostFtdcSubBranchNameType = "string"
TThostFtdcBranchNetCodeType = "string"
TThostFtdcBranchNetNameType = "string"
TThostFtdcOrganFlagType = "string"
TThostFtdcBankCodingForFutureType = "string"
TThostFtdcBankReturnCodeType = "string"
TThostFtdcPlateReturnCodeType = "string"
TThostFtdcBankSubBranchIDType = "string"
TThostFtdcFutureBranchIDType = "string"
TThostFtdcReturnCodeType = "string"
TThostFtdcOperatorCodeType = "string"
TThostFtdcClearDepIDType = "string"
TThostFtdcClearBrchIDType = "string"
TThostFtdcClearNameType = "string"
TThostFtdcBankAccountNameType = "string"
TThostFtdcInvDepIDType = "string"
TThostFtdcInvBrchIDType = "string"
TThostFtdcMessageFormatVersionType = "string"
TThostFtdcDigestType = "string"
TThostFtdcAuthenticDataType = "string"
TThostFtdcPasswordKeyType = "string"
TThostFtdcFutureAccountNameType = "string"
TThostFtdcMobilePhoneType = "string"
TThostFtdcFutureMainKeyType = "string"
TThostFtdcFutureWorkKeyType = "string"
TThostFtdcFutureTransKeyType = "string"
TThostFtdcBankMainKeyType = "string"
TThostFtdcBankWorkKeyType = "string"
TThostFtdcBankTransKeyType = "string"
TThostFtdcBankServerDescriptionType = "string"
TThostFtdcAddInfoType = "string"
TThostFtdcDescrInfoForReturnCodeType = "string"
TThostFtdcCountryCodeType = "string"
TThostFtdcSerialType = "int"
TThostFtdcPlateSerialType = "int"
TThostFtdcBankSerialType = "string"
TThostFtdcCorrectSerialType = "int"
TThostFtdcFutureSerialType = "int"
TThostFtdcApplicationIDType = "int"
TThostFtdcBankProxyIDType = "int"
TThostFtdcFBTCoreIDType = "int"
TThostFtdcServerPortType = "int"
TThostFtdcRepealedTimesType = "int"
TThostFtdcRepealTimeIntervalType = "int"
TThostFtdcTotalTimesType = "int"
TThostFtdcFBTRequestIDType = "int"
TThostFtdcTIDType = "int"
TThostFtdcTradeAmountType = "double"
TThostFtdcCustFeeType = "double"
TThostFtdcFutureFeeType = "double"
TThostFtdcSingleMaxAmtType = "double"
TThostFtdcSingleMinAmtType = "double"
TThostFtdcTotalAmtType = "double"
TThostFtdcCertificationTypeType = "char"
TThostFtdcFileBusinessCodeType = "char"
TThostFtdcCashExchangeCodeType = "char"
TThostFtdcYesNoIndicatorType = "char"
TThostFtdcBanlanceTypeType = "char"
TThostFtdcGenderType = "char"
TThostFtdcFeePayFlagType = "char"
TThostFtdcPassWordKeyTypeType = "char"
TThostFtdcFBTPassWordTypeType = "char"
TThostFtdcFBTEncryModeType = "char"
TThostFtdcBankRepealFlagType = "char"
TThostFtdcBrokerRepealFlagType = "char"
TThostFtdcInstitutionTypeType = "char"
TThostFtdcLastFragmentType = "char"
TThostFtdcBankAccStatusType = "char"
TThostFtdcMoneyAccountStatusType = "char"
TThostFtdcManageStatusType = "char"
TThostFtdcSystemTypeType = "char"
TThostFtdcTxnEndFlagType = "char"
TThostFtdcProcessStatusType = "char"
TThostFtdcCustTypeType = "char"
TThostFtdcFBTTransferDirectionType = "char"
TThostFtdcOpenOrDestroyType = "char"
TThostFtdcAvailabilityFlagType = "char"
TThostFtdcOrganTypeType = "char"
TThostFtdcOrganLevelType = "char"
TThostFtdcProtocalIDType = "char"
TThostFtdcConnectModeType = "char"
TThostFtdcSyncModeType = "char"
TThostFtdcBankAccTypeType = "char"
TThostFtdcFutureAccTypeType = "char"
TThostFtdcOrganStatusType = "char"
TThostFtdcCCBFeeModeType = "char"
TThostFtdcCommApiTypeType = "char"
TThostFtdcServiceIDType = "int"
TThostFtdcServiceLineNoType = "int"
TThostFtdcServiceNameType = "string"
TThostFtdcLinkStatusType = "char"
TThostFtdcCommApiPointerType = "int"
TThostFtdcPwdFlagType = "char"
TThostFtdcSecuAccTypeType = "char"
TThostFtdcTransferStatusType = "char"
TThostFtdcSponsorTypeType = "char"
TThostFtdcReqRspTypeType = "char"
TThostFtdcFBTUserEventTypeType = "char"
TThostFtdcBankIDByBankType = "string"
TThostFtdcBankOperNoType = "string"
TThostFtdcBankCustNoType = "string"
TThostFtdcDBOPSeqNoType = "int"
TThostFtdcTableNameType = "string"
TThostFtdcPKNameType = "string"
TThostFtdcPKValueType = "string"
TThostFtdcDBOperationType = "char"
TThostFtdcSyncFlagType = "char"
TThostFtdcTargetIDType = "string"
TThostFtdcSyncTypeType = "char"
TThostFtdcFBETimeType = "string"
TThostFtdcFBEBankNoType = "string"
TThostFtdcFBECertNoType = "string"
TThostFtdcExDirectionType = "char"
TThostFtdcFBEBankAccountType = "string"
TThostFtdcFBEBankAccountNameType = "string"
TThostFtdcFBEAmtType = "double"
TThostFtdcFBEBusinessTypeType = "string"
TThostFtdcFBEPostScriptType = "string"
TThostFtdcFBERemarkType = "string"
TThostFtdcExRateType = "double"
TThostFtdcFBEResultFlagType = "char"
TThostFtdcFBERtnMsgType = "string"
TThostFtdcFBEExtendMsgType = "string"
TThostFtdcFBEBusinessSerialType = "string"
TThostFtdcFBESystemSerialType = "string"
TThostFtdcFBETotalExCntType = "int"
TThostFtdcFBEExchStatusType = "char"
TThostFtdcFBEFileFlagType = "char"
TThostFtdcFBEAlreadyTradeType = "char"
TThostFtdcFBEOpenBankType = "string"
TThostFtdcFBEUserEventTypeType = "char"
TThostFtdcFBEFileNameType = "string"
TThostFtdcFBEBatchSerialType = "string"
TThostFtdcFBEReqFlagType = "char"
TThostFtdcNotifyClassType = "char"
TThostFtdcRiskNofityInfoType = "string"
TThostFtdcForceCloseSceneIdType = "string"
TThostFtdcForceCloseTypeType = "char"
TThostFtdcInstrumentIDsType = "string"
TThostFtdcRiskNotifyMethodType = "char"
TThostFtdcRiskNotifyStatusType = "char"
TThostFtdcRiskUserEventType = "char"
TThostFtdcParamIDType = "int"
TThostFtdcParamNameType = "string"
TThostFtdcParamValueType = "string"
TThostFtdcConditionalOrderSortTypeType = "char"
TThostFtdcSendTypeType = "char"
TThostFtdcClientIDStatusType = "char"
TThostFtdcIndustryIDType = "string"
TThostFtdcQuestionIDType = "string"
TThostFtdcQuestionContentType = "string"
TThostFtdcOptionIDType = "string"
TThostFtdcOptionContentType = "string"
TThostFtdcQuestionTypeType = "char"
TThostFtdcProcessIDType = "string"
TThostFtdcSeqNoType = "int"
TThostFtdcUOAProcessStatusType = "string"
TThostFtdcProcessTypeType = "string"
TThostFtdcBusinessTypeType = "char"
TThostFtdcCfmmcReturnCodeType = "char"
TThostFtdcExReturnCodeType = "int"
TThostFtdcClientTypeType = "char"
TThostFtdcExchangeIDTypeType = "char"
TThostFtdcExClientIDTypeType = "char"
TThostFtdcClientClassifyType = "string"
TThostFtdcUOAOrganTypeType = "string"
TThostFtdcUOACountryCodeType = "string"
TThostFtdcAreaCodeType = "string"
TThostFtdcFuturesIDType = "string"
TThostFtdcCffmcDateType = "string"
TThostFtdcCffmcTimeType = "string"
TThostFtdcNocIDType = "string"
TThostFtdcUpdateFlagType = "char"
TThostFtdcApplyOperateIDType = "char"
TThostFtdcApplyStatusIDType = "char"
TThostFtdcSendMethodType = "char"
TThostFtdcEventTypeType = "string"
TThostFtdcEventModeType = "char"
TThostFtdcUOAAutoSendType = "char"
TThostFtdcQueryDepthType = "int"
TThostFtdcDataCenterIDType = "int"
TThostFtdcFlowIDType = "char"
TThostFtdcCheckLevelType = "char"
TThostFtdcCheckNoType = "int"
TThostFtdcCheckStatusType = "char"
TThostFtdcUsedStatusType = "char"
TThostFtdcRateTemplateNameType = "string"
TThostFtdcPropertyStringType = "string"
TThostFtdcBankAcountOriginType = "char"
TThostFtdcMonthBillTradeSumType = "char"
TThostFtdcFBTTradeCodeEnumType = "char"
TThostFtdcRateTemplateIDType = "string"
TThostFtdcRiskRateType = "string"
TThostFtdcTimestampType = "int"
TThostFtdcInvestorIDRuleNameType = "string"
TThostFtdcInvestorIDRuleExprType = "string"
TThostFtdcLastDriftType = "int"
TThostFtdcLastSuccessType = "int"
TThostFtdcAuthKeyType = "string"
TThostFtdcSerialNumberType = "string"
TThostFtdcOTPTypeType = "char"
TThostFtdcOTPVendorsIDType = "string"
TThostFtdcOTPVendorsNameType = "string"
TThostFtdcOTPStatusType = "char"
TThostFtdcBrokerUserTypeType = "char"
TThostFtdcFutureTypeType = "char"
TThostFtdcFundEventTypeType = "char"
TThostFtdcAccountSourceTypeType = "char"
TThostFtdcCodeSourceTypeType = "char"
TThostFtdcUserRangeType = "char"
TThostFtdcTimeSpanType = "string"
TThostFtdcImportSequenceIDType = "string"
TThostFtdcByGroupType = "char"
TThostFtdcTradeSumStatModeType = "char"
TThostFtdcComTypeType = "int"
TThostFtdcUserProductIDType = "string"
TThostFtdcUserProductNameType = "string"
TThostFtdcUserProductMemoType = "string"
TThostFtdcCSRCCancelFlagType = "string"
TThostFtdcCSRCDateType = "string"
TThostFtdcCSRCInvestorNameType = "string"
TThostFtdcCSRCOpenInvestorNameType = "string"
TThostFtdcCSRCInvestorIDType = "string"
TThostFtdcCSRCIdentifiedCardNoType = "string"
TThostFtdcCSRCClientIDType = "string"
TThostFtdcCSRCBankFlagType = "string"
TThostFtdcCSRCBankAccountType = "string"
TThostFtdcCSRCOpenNameType = "string"
TThostFtdcCSRCMemoType = "string"
TThostFtdcCSRCTimeType = "string"
TThostFtdcCSRCTradeIDType = "string"
TThostFtdcCSRCExchangeInstIDType = "string"
TThostFtdcCSRCMortgageNameType = "string"
TThostFtdcCSRCReasonType = "string"
TThostFtdcIsSettlementType = "string"
TThostFtdcCSRCMoneyType = "double"
TThostFtdcCSRCPriceType = "double"
TThostFtdcCSRCOptionsTypeType = "string"
TThostFtdcCSRCStrikePriceType = "double"
TThostFtdcCSRCTargetProductIDType = "string"
TThostFtdcCSRCTargetInstrIDType = "string"
TThostFtdcCommModelNameType = "string"
TThostFtdcCommModelMemoType = "string"
TThostFtdcExprSetModeType = "char"
TThostFtdcRateInvestorRangeType = "char"
TThostFtdcAgentBrokerIDType = "string"
TThostFtdcDRIdentityIDType = "int"
TThostFtdcDRIdentityNameType = "string"
TThostFtdcDBLinkIDType = "string"
TThostFtdcSyncDataStatusType = "char"
TThostFtdcTradeSourceType = "char"
TThostFtdcFlexStatModeType = "char"
TThostFtdcByInvestorRangeType = "char"
TThostFtdcSRiskRateType = "string"
TThostFtdcSequenceNo12Type = "int"
TThostFtdcPropertyInvestorRangeType = "char"
TThostFtdcFileStatusType = "char"
TThostFtdcFileGenStyleType = "char"
TThostFtdcSysOperModeType = "char"
TThostFtdcSysOperTypeType = "char"
TThostFtdcCSRCDataQueyTypeType = "char"
TThostFtdcFreezeStatusType = "char"
TThostFtdcStandardStatusType = "char"
TThostFtdcCSRCFreezeStatusType = "string"
TThostFtdcRightParamTypeType = "char"
TThostFtdcRightTemplateIDType = "string"
TThostFtdcRightTemplateNameType = "string"
TThostFtdcDataStatusType = "char"
TThostFtdcAMLCheckStatusType = "char"
TThostFtdcAmlDateTypeType = "char"
TThostFtdcAmlCheckLevelType = "char"
TThostFtdcAmlCheckFlowType = "string"
TThostFtdcDataTypeType = "string"
TThostFtdcExportFileTypeType = "char"
TThostFtdcSettleManagerTypeType = "char"
TThostFtdcSettleManagerIDType = "string"
TThostFtdcSettleManagerNameType = "string"
TThostFtdcSettleManagerLevelType = "char"
TThostFtdcSettleManagerGroupType = "char"
TThostFtdcCheckResultMemoType = "string"
TThostFtdcFunctionUrlType = "string"
TThostFtdcAuthInfoType = "string"
TThostFtdcAppIDType = "string"
TThostFtdcAuthCodeType = "string"
TThostFtdcAppTypeType = "char"
TThostFtdcLimitUseTypeType = "char"
TThostFtdcDataResourceType = "char"
TThostFtdcMarginTypeType = "char"
TThostFtdcActiveTypeType = "char"
TThostFtdcMarginRateTypeType = "char"
TThostFtdcBackUpStatusType = "char"
TThostFtdcInitSettlementType = "char"
TThostFtdcReportStatusType = "char"
TThostFtdcSaveStatusType = "char"
TThostFtdcSettArchiveStatusType = "char"
TThostFtdcCTPTypeType = "char"
TThostFtdcToolIDType = "string"
TThostFtdcToolNameType = "string"
TThostFtdcCloseDealTypeType = "char"
TThostFtdcMortgageFundUseRangeType = "char"
TThostFtdcCurrencyUnitType = "double"
TThostFtdcExchangeRateType = "double"
TThostFtdcSpecProductTypeType = "char"
TThostFtdcFundMortgageTypeType = "char"
TThostFtdcAccountSettlementParamIDType = "char"
TThostFtdcCurrencyNameType = "string"
TThostFtdcCurrencySignType = "string"
TThostFtdcFundMortDirectionType = "char"
TThostFtdcBusinessClassType = "char"
TThostFtdcSwapSourceTypeType = "char"
TThostFtdcCurrExDirectionType = "char"
TThostFtdcCurrencySwapStatusType = "char"
TThostFtdcCurrExchCertNoType = "string"
TThostFtdcBatchSerialNoType = "string"
TThostFtdcReqFlagType = "char"
TThostFtdcResFlagType = "char"
TThostFtdcPageControlType = "string"
TThostFtdcRecordCountType = "int"
TThostFtdcCurrencySwapMemoType = "string"
TThostFtdcExStatusType = "char"
TThostFtdcClientRegionType = "char"
TThostFtdcWorkPlaceType = "string"
TThostFtdcBusinessPeriodType = "string"
TThostFtdcWebSiteType = "string"
TThostFtdcUOAIdCardTypeType = "string"
TThostFtdcClientModeType = "string"
TThostFtdcInvestorFullNameType = "string"
TThostFtdcUOABrokerIDType = "string"
TThostFtdcUOAZipCodeType = "string"
TThostFtdcUOAEMailType = "string"
TThostFtdcOldCityType = "string"
TThostFtdcCorporateIdentifiedCardNoType = "string"
TThostFtdcHasBoardType = "char"
TThostFtdcStartModeType = "char"
TThostFtdcTemplateTypeType = "char"
TThostFtdcLoginModeType = "char"
TThostFtdcPromptTypeType = "char"
TThostFtdcLedgerManageIDType = "string"
TThostFtdcInvestVarietyType = "string"
TThostFtdcBankAccountTypeType = "string"
TThostFtdcLedgerManageBankType = "string"
TThostFtdcCffexDepartmentNameType = "string"
TThostFtdcCffexDepartmentCodeType = "string"
TThostFtdcHasTrusteeType = "char"
TThostFtdcCSRCMemo1Type = "string"
TThostFtdcAssetmgrCFullNameType = "string"
TThostFtdcAssetmgrApprovalNOType = "string"
TThostFtdcAssetmgrMgrNameType = "string"
TThostFtdcAmTypeType = "char"
TThostFtdcCSRCAmTypeType = "string"
TThostFtdcCSRCFundIOTypeType = "char"
TThostFtdcCusAccountTypeType = "char"
TThostFtdcCSRCNationalType = "string"
TThostFtdcCSRCSecAgentIDType = "string"
TThostFtdcLanguageTypeType = "char"
TThostFtdcAmAccountType = "string"
TThostFtdcAssetmgrClientTypeType = "char"
TThostFtdcAssetmgrTypeType = "char"
TThostFtdcUOMType = "string"
TThostFtdcSHFEInstLifePhaseType = "string"
TThostFtdcSHFEProductClassType = "string"
TThostFtdcPriceDecimalType = "string"
TThostFtdcInTheMoneyFlagType = "string"
TThostFtdcCheckInstrTypeType = "char"
TThostFtdcDeliveryTypeType = "char"
TThostFtdcBigMoneyType = "double"
TThostFtdcMaxMarginSideAlgorithmType = "char"
TThostFtdcDAClientTypeType = "char"
TThostFtdcCombinInstrIDType = "string"
TThostFtdcCombinSettlePriceType = "string"
TThostFtdcDCEPriorityType = "int"
TThostFtdcTradeGroupIDType = "int"
TThostFtdcIsCheckPrepaType = "int"
TThostFtdcUOAAssetmgrTypeType = "char"
TThostFtdcDirectionEnType = "char"
TThostFtdcOffsetFlagEnType = "char"
TThostFtdcHedgeFlagEnType = "char"
TThostFtdcFundIOTypeEnType = "char"
TThostFtdcFundTypeEnType = "char"
TThostFtdcFundDirectionEnType = "char"
TThostFtdcFundMortDirectionEnType = "char"
TThostFtdcSwapBusinessTypeType = "string"
TThostFtdcOptionsTypeType = "char"
TThostFtdcStrikeModeType = "char"
TThostFtdcStrikeTypeType = "char"
TThostFtdcApplyTypeType = "char"
TThostFtdcGiveUpDataSourceType = "char"
TThostFtdcExecOrderSysIDType = "string"
TThostFtdcExecResultType = "char"
TThostFtdcStrikeSequenceType = "int"
TThostFtdcStrikeTimeType = "string"
TThostFtdcCombinationTypeType = "char"
TThostFtdcOptionRoyaltyPriceTypeType = "char"
TThostFtdcBalanceAlgorithmType = "char"
TThostFtdcActionTypeType = "char"
TThostFtdcForQuoteStatusType = "char"
TThostFtdcValueMethodType = "char"
TThostFtdcExecOrderPositionFlagType = "char"
TThostFtdcExecOrderCloseFlagType = "char"
TThostFtdcProductTypeType = "char"
TThostFtdcCZCEUploadFileNameType = "char"
TThostFtdcDCEUploadFileNameType = "char"
TThostFtdcSHFEUploadFileNameType = "char"
TThostFtdcCFFEXUploadFileNameType = "char"
TThostFtdcCombDirectionType = "char"
TThostFtdcStrikeOffsetTypeType = "char"
TThostFtdcReserveOpenAccStasType = "char"
TThostFtdcLoginRemarkType = "string"
TThostFtdcInvestUnitIDType = "string"
TThostFtdcBulletinIDType = "int"
TThostFtdcNewsTypeType = "string"
TThostFtdcNewsUrgencyType = "char"
TThostFtdcAbstractType = "string"
TThostFtdcComeFromType = "string"
TThostFtdcURLLinkType = "string"
TThostFtdcLongIndividualNameType = "string"
TThostFtdcLongFBEBankAccountNameType = "string"
TThostFtdcDateTimeType = "string"
TThostFtdcWeakPasswordSourceType = "char"
TThostFtdcRandomStringType = "string"
TThostFtdcOptSelfCloseFlagType = "char"
TThostFtdcBizTypeType = "char"
|
bigdig/vnpy
|
vnpy/api/nh/generator/futures/nh_typedef.py
|
Python
|
mit
| 27,858
|
import praw
import re
from re import findall
def deletemsg():
r = praw.Reddit(user_agent="Samachar Bot for /r/india by /u/sallurocks")
r.login("samacharbot2", "prawisverygood")
unread = r.get_unread(limit=None)
print "here"
for msg in unread:
print msg.body
if msg.body.lower() == 'delete':
try:
print "found one"
idd = msg.id
idd = 't1_' + idd
print idd
comment = r.get_info(thing_id=idd)
parentid = comment.parent_id
print parentid
comment_parent = r.get_info(thing_id=parentid)
sublink = comment_parent.link_id
author1 = r.get_info(thing_id=sublink)
print author1.author
print msg.author.name
if (str(msg.author.name) == str(author1.author)):
comment_parent.delete()
print "deletedd"
msg.mark_as_read()
else:
msg.mark_as_read()
continue
except Exception as e:
print "5Unknown ERROR"
print type(e)
print e.args
print e
print "\n"
# continue
msg.mark_as_read()
continue
if __name__ == '__main__':
deletemsg()
|
SalmaanP/samacharbot2
|
WIP/deletemsg.py
|
Python
|
gpl-3.0
| 1,429
|
#from plfit import discrete_likelihood
from plfit import plfit
from plfit import test_fitter,pl_inv,plexp_inv,plexp_cdf, plexp_pdf, plfit_lsq
from plfit_py import plfit as plfit_py
|
giserh/plfit
|
plfit/__init__.py
|
Python
|
mit
| 181
|
#!/usr/bin/env python
"""
distutils setup (setup.py).
This is just boilerplate code, since we do like to try to keep data separate
from code as much as possible. The customizable information really comes
from file __pkginfo__.py.
"""
import os, sys
if not ((2, 4) <= sys.version_info[0:2] < (3, 0)):
mess = "Only Python Versions 2.4 to 2.7 are supported in this package."
if (3, 2) <= sys.version_info[0:2] < (3, 7):
mess += "\nFor your Python, version %s, see trepan3k" % sys.version[0:3]
elif sys.version_info[0:2] < (2, 6):
mess += "\nFor your Python, version %s, see pydbgr" % sys.version[0:3]
raise Exception(mess)
elif ((2, 4) <= sys.version_info[0:2] < (2, 6)) and not os.path.exists(
"gitbranch-master"
):
raise Exception("You have the wrong code or git branch for Python 2.4, 2.5")
# Get the package information used in setup().
from __pkginfo__ import (
author,
author_email,
classifiers,
entry_points,
install_requires,
license,
long_description,
modname,
packages,
py_modules,
short_desc,
__version__,
web,
zip_safe,
)
__import__("pkg_resources")
from setuptools import setup
setup(
author=author,
author_email=author_email,
classifiers=classifiers,
data_files=[
(
"trepan/processor/command/help",
[
"trepan/processor/command/help/arange.rst",
"trepan/processor/command/help/command.rst",
"trepan/processor/command/help/examples.rst",
"trepan/processor/command/help/filename.rst",
"trepan/processor/command/help/location.rst",
"trepan/processor/command/help/range.rst",
"trepan/processor/command/help/suffixes.rst",
],
)
],
description=short_desc,
entry_points=entry_points,
install_requires=install_requires,
license=license,
long_description=long_description,
py_modules=py_modules,
name=modname,
packages=packages,
test_suite="nose.collector",
url=web,
version=__version__,
zip_safe=zip_safe,
)
|
rocky/python2-trepan
|
setup.py
|
Python
|
gpl-3.0
| 2,146
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
import os
import json
import time
import logging
import signal
import pytest
from PyQt5.QtCore import QFileSystemWatcher
from qutebrowser.commands import userscripts
from qutebrowser.utils import utils
@pytest.mark.posix
class TestQtFIFOReader:
@pytest.fixture
def reader(self, tmpdir, qapp):
fifo_path = str(tmpdir / 'fifo')
os.mkfifo(fifo_path) # pylint: disable=no-member,useless-suppression
reader = userscripts._QtFIFOReader(fifo_path)
yield reader
if reader._notifier.isEnabled():
reader.cleanup()
def test_single_line(self, reader, qtbot):
"""Test QSocketNotifier with a single line of data."""
with qtbot.waitSignal(reader.got_line) as blocker:
with open(reader._filepath, 'w', encoding='utf-8') as f:
f.write('foobar\n')
assert blocker.args == ['foobar']
def test_cleanup(self, reader):
assert not reader._fifo.closed
reader.cleanup()
assert reader._fifo.closed
@pytest.fixture(params=[
userscripts._POSIXUserscriptRunner,
userscripts._WindowsUserscriptRunner,
])
def runner(request, runtime_tmpdir):
if (not utils.is_posix and
request.param is userscripts._POSIXUserscriptRunner):
pytest.skip("Requires a POSIX os")
raise utils.Unreachable
return request.param()
def test_command(qtbot, py_proc, runner):
cmd, args = py_proc(r"""
import os
with open(os.environ['QUTE_FIFO'], 'w') as f:
f.write('foo\n')
""")
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_html('')
runner.store_text('')
assert blocker.args == ['foo']
def test_custom_env(qtbot, monkeypatch, py_proc, runner):
monkeypatch.setenv('QUTEBROWSER_TEST_1', '1')
env = {'QUTEBROWSER_TEST_2': '2'}
cmd, args = py_proc(r"""
import os
import json
env = dict(os.environ)
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(env, f)
f.write('\n')
""")
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args, env=env)
runner.store_html('')
runner.store_text('')
data = blocker.args[0]
ret_env = json.loads(data)
assert 'QUTEBROWSER_TEST_1' in ret_env
assert 'QUTEBROWSER_TEST_2' in ret_env
def test_source(qtbot, py_proc, runner):
"""Make sure the page source is read and cleaned up correctly."""
cmd, args = py_proc(r"""
import os
import json
data = {
'html_file': os.environ['QUTE_HTML'],
'text_file': os.environ['QUTE_TEXT'],
}
with open(os.environ['QUTE_HTML'], 'r') as f:
data['html'] = f.read()
with open(os.environ['QUTE_TEXT'], 'r') as f:
data['text'] = f.read()
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(data, f)
f.write('\n')
""")
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_html('This is HTML')
runner.store_text('This is text')
data = blocker.args[0]
parsed = json.loads(data)
assert parsed['text'] == 'This is text'
assert parsed['html'] == 'This is HTML'
assert not os.path.exists(parsed['text_file'])
assert not os.path.exists(parsed['html_file'])
def test_command_with_error(qtbot, py_proc, runner, caplog):
cmd, args = py_proc(r"""
import sys, os, json
with open(os.environ['QUTE_FIFO'], 'w') as f:
json.dump(os.environ['QUTE_TEXT'], f)
f.write('\n')
sys.exit(1)
""")
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(runner.finished, timeout=10000):
with qtbot.waitSignal(runner.got_cmd, timeout=10000) as blocker:
runner.prepare_run(cmd, *args)
runner.store_text('Hello World')
runner.store_html('')
data = json.loads(blocker.args[0])
assert not os.path.exists(data)
def test_killed_command(qtbot, tmpdir, py_proc, runner, caplog):
data_file = tmpdir / 'data'
watcher = QFileSystemWatcher()
watcher.addPath(str(tmpdir))
cmd, args = py_proc(r"""
import os
import time
import sys
import json
data = {
'pid': os.getpid(),
'text_file': os.environ['QUTE_TEXT'],
}
# We can't use QUTE_FIFO to transmit the PID because that wouldn't work
# on Windows, where QUTE_FIFO is only monitored after the script has
# exited.
with open(sys.argv[1], 'w') as f:
json.dump(data, f)
time.sleep(30)
""")
args.append(str(data_file))
with qtbot.waitSignal(watcher.directoryChanged, timeout=10000):
runner.prepare_run(cmd, *args)
runner.store_text('Hello World')
runner.store_html('')
# Make sure the PID was written to the file, not just the file created
time.sleep(0.5)
data = json.load(data_file)
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(runner.finished):
os.kill(int(data['pid']), signal.SIGTERM)
assert not os.path.exists(data['text_file'])
def test_temporary_files_failed_cleanup(caplog, qtbot, py_proc, runner):
"""Delete a temporary file from the script so cleanup fails."""
cmd, args = py_proc(r"""
import os
os.remove(os.environ['QUTE_HTML'])
""")
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(runner.finished, timeout=10000):
runner.prepare_run(cmd, *args)
runner.store_text('')
runner.store_html('')
assert len(caplog.records) == 1
expected = "Failed to delete tempfile"
assert caplog.messages[0].startswith(expected)
def test_unicode_error(caplog, qtbot, py_proc, runner):
cmd, args = py_proc(r"""
import os
with open(os.environ['QUTE_FIFO'], 'wb') as f:
f.write(b'\x80')
""")
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(runner.finished, timeout=10000):
runner.prepare_run(cmd, *args)
runner.store_text('')
runner.store_html('')
assert len(caplog.records) == 1
expected = "Invalid unicode in userscript output: "
assert caplog.messages[0].startswith(expected)
@pytest.mark.fake_os('unknown')
def test_unsupported(tabbed_browser_stubs):
with pytest.raises(userscripts.UnsupportedError, match="Userscripts are "
"not supported on this platform!"):
userscripts.run_async(tab=None, cmd=None, win_id=0, env=None)
|
fiete201/qutebrowser
|
tests/unit/commands/test_userscripts.py
|
Python
|
gpl-3.0
| 7,833
|
import textwrap
def parse_script_args(args):
"""
Separate the command line arguments into arguments for pip
and arguments to Python.
>>> parse_script_args(['foo', '--', 'bar'])
(['foo'], ['bar'])
>>> parse_script_args(['foo', 'bar'])
(['foo', 'bar'], [])
"""
try:
pivot = args.index('--')
except ValueError:
pivot = len(args)
return args[:pivot], args[pivot + 1:]
help_doc = textwrap.dedent("""
Usage:
Arguments to pip-run prior to `--` are used to specify the requirements
to make available, just as arguments to pip install. For example,
pip-run -r requirements.txt "requests>=2.0"
That will launch python after installing the deps in requirements.txt
and also a late requests. Packages are always installed to a temporary
location and cleaned up when the process exits.
Arguments after `--` are passed to the Python interpreter. So to launch
`script.py`:
pip-run -- script.py
If the `--` is ommitted or nothing is passed, the python interpreter
will be launched in interactive mode:
pip-run
>>>
For more examples and details, see https://pypi.org/project/pip-run.
""").lstrip()
def intercept(args):
"""
Detect certain args and intercept them.
"""
if '--help' in args or '-h' in args:
print(help_doc)
raise SystemExit(0)
|
jaraco/rwt
|
pip_run/commands.py
|
Python
|
mit
| 1,282
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibCheck task
This task will check the records against a defined set of rules.
"""
import sys
import getopt
import os
import traceback
import time
import inspect
import itertools
import collections
import functools
from collections import namedtuple
from ConfigParser import RawConfigParser
from datetime import datetime
from tempfile import mkstemp
from invenio.bibtask import \
task_init, \
task_set_option, \
task_has_option, \
task_get_option, write_message, \
task_update_progress, \
task_sleep_now_if_required, \
get_modified_records_since, \
task_low_level_submission, \
task_get_task_param
from invenio.config import \
CFG_VERSION, \
CFG_ETCDIR, \
CFG_PYLIBDIR, \
CFG_SITE_URL, \
CFG_TMPSHAREDDIR, \
CFG_CERN_SITE, \
CFG_SITE_RECORD
from invenio.search_engine import \
perform_request_search, \
search_unit_in_bibxxx, \
search_pattern
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_xml_output, record_add_field
from invenio.pluginutils import PluginContainer
from invenio.intbitset import intbitset
from invenio.dbquery import run_sql
from invenio.bibcatalog import BIBCATALOG_SYSTEM
from invenio.shellutils import split_cli_ids_arg
from invenio.jsonutils import json
from invenio.websearch_webcoll import get_cache_last_updated_timestamp
CFG_BATCH_SIZE = 1000
class RulesParseError(Exception):
""" An exception indicating an error in the rules definition """
def __init__(self, rule_name, error):
Exception.__init__(self, "Invalid rule '%s': %s." % (rule_name,
error))
class Tickets(object):
"""Handle ticket accumulation and dispatching."""
def __init__(self, records):
self.records = records
self.policy_method = None
self.ticket_creation_policy = \
task_get_option('ticket_creation_policy', 'per-record')
def resolve_ticket_creation_policy(self):
"""Resolve the policy for creating tickets."""
known_policies = ('per-rule',
'per-record',
'per-rule-per-record',
'no-tickets')
if self.ticket_creation_policy not in known_policies:
raise Exception("Invalid ticket_creation_policy in config '{0}'".
format(self.ticket_creation_policy))
if task_get_option('no_tickets', False):
self.ticket_creation_policy = 'no-tickets'
policy_translator = {
'per-rule': self.tickets_per_rule,
'per-record': self.tickets_per_record,
'per-rule-per-record': self.tickets_per_rule_per_record
}
self.policy_method = policy_translator[self.ticket_creation_policy]
@staticmethod
def submit_ticket(msg_subject, msg, record_id):
"""Submit a single ticket."""
if isinstance(msg, unicode):
msg = msg.encode("utf-8")
submit = functools.partial(BIBCATALOG_SYSTEM.ticket_submit,
subject=msg_subject, text=msg,
queue=task_get_option("queue", "Bibcheck"))
if record_id is not None:
submit = functools.partial(submit, recordid=record_id)
res = submit()
write_message("Bibcatalog returned %s" % res)
if res > 0:
BIBCATALOG_SYSTEM.ticket_comment(None, res, msg)
def submit(self):
"""Generate and submit tickets for the bibcatalog system."""
self.resolve_ticket_creation_policy()
for ticket_information in self.policy_method():
self.submit_ticket(*ticket_information)
def _generate_subject(self, issue_type, record_id, rule_name):
"""Generate a fitting subject based on what information is given."""
assert any((i is not None for i in (issue_type, record_id, rule_name)))
return "[BibCheck{issue_type}]{record_id}{rule_name}".format(
issue_type=":" + issue_type if issue_type else "",
record_id=" [ID:" + record_id + "]" if self.ticket_creation_policy
in ("per-record", "per-rule-per-record") else "",
rule_name=" [Rule:" + rule_name + "]" if self.ticket_creation_policy
in ("per-rule", "per-rule-per-record") else "")
@staticmethod
def _get_url(record):
"""Resolve the URL required to edit a record."""
return "%s/%s/%s/edit" % (CFG_SITE_URL, CFG_SITE_RECORD,
record.record_id)
def tickets_per_rule(self):
"""Generate with the `per-rule` policy."""
output = collections.defaultdict(list)
for record in self.records:
for issue in record.issues:
output[issue.rule].append((record, issue.nature, issue.msg))
for rule_name in output.iterkeys():
msg = []
for record, issue_nature, issue_msg in output[rule_name]:
msg.append("{issue_nature}: {issue_msg}".format(
issue_nature=issue_nature, issue_msg=issue_msg))
msg.append("Edit record ({record_id}) {url}\n".format(
record_id=record.record_id, url=self._get_url(record)))
msg_subject = self._generate_subject(None, None, rule_name)
yield (msg_subject, "\n".join(msg), None)
def tickets_per_record(self):
"""Generate with the `per-record` policy."""
output = collections.defaultdict(list)
for record in self.records:
for issue in record.issues:
output[record].append((issue.nature, issue.msg))
for record in output.iterkeys():
msg = []
for issue in output[record]:
issue_nature, issue_msg = issue
msg.append("{issue_type}: {rule_messages}".
format(record_id=record.record_id,
issue_type=issue_nature,
rule_messages=issue_msg))
msg.append("Edit record: {url}".format(url=self._get_url(record)))
msg_subject = self._generate_subject(None, record.record_id, None)
yield (msg_subject, "\n".join(msg), record.record_id)
def tickets_per_rule_per_record(self):
"""Generate with the `per-rule-per-record` policy."""
output = collections.defaultdict(list)
for record in self.records:
for issue in record.issues:
output[(issue.rule, record)].append((issue.nature, issue.msg))
for issue_rule, record in output.iterkeys():
msg = []
for issue_nature, issue_msg in output[(issue_rule, record)]:
msg.append("{issue_message}".format(issue_message=issue_msg))
msg.append("Edit record ({record_id}): {url}".format(url=self._get_url(record),
record_id=record.record_id))
msg_subject = self._generate_subject(issue_nature, record.record_id,
issue_rule)
yield (msg_subject, "\n".join(msg), record.record_id)
class Issue(object):
"""Holds information about a single record issue."""
def __init__(self, nature, rule, msg):
self._nature = None
self.nature = nature
self.rule = rule
self.msg = msg
@property
def nature(self):
return self._nature
@nature.setter
def nature(self, value):
assert value in ('error', 'amendment', 'warning')
self._nature = value
class AmendableRecord(dict):
""" Class that wraps a record (recstruct) to pass to a plugin """
def __init__(self, record):
dict.__init__(self, record)
self.issues = []
self.valid = True
self.amended = False
self.holdingpen = False
self.rule = None
self.record_id = self["001"][0][3]
@property
def _errors(self):
return [i for i in self.issues if i.nature == 'error']
@property
def _amendments(self):
return [i for i in self.issues if i.nature == 'amendment']
@property
def _warnings(self):
return [i for i in self.issues if i.nature == 'warning']
def iterfields(self, fields, subfield_filter=(None, None)):
"""
Iterates over marc tags that match a marc expression.
This function accepts a list of marc tags (a 6 character string
containing a 3 character tag, two 1 character indicators and an 1
character subfield code) and returns and yields tuples of marc tags
(without wildcards) and the field value. Optionally filters for subfield
values.
Examples:
record.iterfields(["%%%%%%", "%%%%%_"])
--> Iterator of all the field and subfield values.
('_' is for control fields that have no codes)
record.iterfields(["100__a"])
--> The author of record
record.iterfields(["%%%%%u"])
--> All "u" subfields
:param fields: marc tags (accepts wildcards)
:type fields: list of str
:param subfield_filter: filter for a specific subfield
:type subfield_filter: (str, str)
:yields: (position, field_value)
`position` is (tag, localpos, fieldpos) if filter was disabled, or
(tag, localpos, fieldpos, filterpos) if filter was enabled
"""
for field in fields:
for res in self.iterfield(field, subfield_filter=subfield_filter):
yield res
def iterfield(self, field, subfield_filter=(None, None)):
"""Like iterfields for a single field."""
assert len(field) == 6
field = field.replace("_", " ")
ind1, ind2, code = field[3:]
assert len(subfield_filter) == 2
SubfieldFilter = namedtuple('SubfieldFilter', ['code', 'value'])
subfield_filter = SubfieldFilter(*subfield_filter)
filter_enabled = subfield_filter.code is not None
def filter_passes(subfield_code, result):
return subfield_filter.code in ('%', subfield_code) and \
subfield_filter.value == result
for tag in self.itertags(field[:3]):
for (local_position, field_obj) in enumerate(self[tag]):
if ind1 in ('%', field_obj[1]) and ind2 in ('%', field_obj[2]):
field_name = tag + field_obj[1] + field_obj[2]
field_name = field_name.replace(' ', '_')
if code == " " and field_obj[3]:
position = field_name + "_", local_position, None
value = field_obj[3]
yield position, value
else:
# `code` is code from `field`
# `subfield_code` is from `field_obj` (storage)
if filter_enabled:
for subfield_position, subfield_tuple in enumerate(field_obj[0]):
subfield_code, value = subfield_tuple
filter_position = None # Until challenged
if filter_passes(subfield_code, value):
filter_position = subfield_position
break
if not filter_enabled or filter_position is not None:
for subfield_position, subfield_tuple in enumerate(field_obj[0]):
subfield_code, value = subfield_tuple
if code in ("%", subfield_code):
position = field_name + subfield_code, local_position, \
subfield_position
if filter_enabled:
position = position + (filter_position,)
yield position, value
def _query(self, position):
""" Return a position """
tag = position[0].replace("_", " ")
res = self[tag[0:3]]
if position[1] is not None:
res = res[position[1]]
assert res[1] == tag[3] and res[2] == tag[4] # Check indicators
if position[2] is not None:
res = res[0][position[2]]
assert res[0] == tag[5]
return res
def _queryval(self, position):
""" Like _query() but return the value """
if position[2] is None:
return self._query(position)[3]
else:
return self._query(position)[1]
def amend_field(self, position, new_value, message=""):
"""
Changes the value of the field in the specified position to new value
and marks the record as amended.
Optional message to explain what was changed.
"""
tag, localpos, subfieldpos = position
tag = tag.replace("_", " ")
try:
old_value = self._queryval(position)
if new_value != old_value:
if position[2] is None:
fields = self[tag[0:3]]
fields[localpos] = fields[localpos][0:3] + (new_value,)
else:
self._query(position[:2] + (None,))[0][subfieldpos] = (tag[5], new_value)
if message == '':
message = u"Changed field %s from '%s' to '%s'" % (position[0],
old_value.decode('utf-8'), new_value.decode('utf-8'))
self.set_amended(message)
except Exception as err:
self.set_invalid("Error when trying to amend the record at position %s: %s. Maybe there is an empty subfield code?" % (position, err))
def delete_field(self, position, message=""):
"""
Delete a field or subfield. Returns the deleted field or subfield
"""
if message == "":
message = u"Deleted field %s" % (position[0])
self.set_amended(message)
if position[2] is None:
return self._query(position[:1] + (None, None)).pop(position[1])
else:
return self._query(position[:2] + (None,))[0].pop(position[2])
def add_field(self, tag, value, subfields=None):
""" Add a field """
tag = tag.replace("_", " ")
record_add_field(self, tag[:3], tag[3], tag[4], value, subfields)
self.set_amended("Added field %s" % tag)
def add_subfield(self, position, code, value):
""" Add a subfield to the field in the specified field """
self._query(position[:2] + (None,))[0].append((code, value))
self.set_amended("Added subfield %s='%s' to field %s" % (code, value,
position[0][:5]))
def set_amended(self, message):
""" Mark the record as amended """
write_message("Amended record %s by rule %s: %s" %
(self.record_id, self.rule["name"], message))
self.issues.append(Issue('amendment', self.rule['name'], message))
self.amended = True
if self.rule["holdingpen"]:
self.holdingpen = True
def set_invalid(self, reason):
""" Mark the record as invalid """
url = "{site}/{record}/{record_id}".format(site=CFG_SITE_URL,
record=CFG_SITE_RECORD,
record_id=self.record_id)
write_message("Record {url} marked as invalid by rule {name}: {reason}".
format(url=url, name=self.rule["name"], reason=reason))
self.issues.append(Issue('error', self.rule['name'], reason))
self.valid = False
def warn(self, msg):
""" Add a warning to the record """
self.issues.append(Issue('warning', self.rule['name'], msg))
write_message("[WARN] record %s by rule %s: %s" %
(self.record_id, self.rule["name"], msg))
def set_rule(self, rule):
""" Set the current rule the record is been checked against """
self.rule = rule
def itertags(self, tag):
"""
Yields the tags of the record that matching
@param tag: tag with wildcards
@yields tags without wildcards
"""
if "%" in tag:
for key in self.iterkeys():
if ((tag[0] in ("%", key[0])) and
(tag[1] in ("%", key[1])) and
(tag[2] in ("%", key[2]))):
yield key
else:
if tag in self:
yield tag
def is_dummy(self):
return len(list(self.iterfield("001%%_"))) == 1 and \
len(self.keys()) == 1
def task_parse_options(key, val, *_):
""" Must be defined for bibtask to create a task """
if key in ("--all", "-a"):
task_set_option("reset_rules", set(val.split(",")))
elif key in ("--enable-rules", "-e"):
task_set_option("enabled_rules", set(val.split(",")))
elif key in ("--id", "-i"):
task_set_option("record_ids", intbitset(split_cli_ids_arg(val)))
elif key in ("--queue", "-q"):
task_set_option("queue", val)
elif key in ("--no-tickets", "-t"):
task_set_option("no_tickets", True)
elif key in ("--ticket-creation-policy", "-p"):
task_set_option("ticket_creation_policy", val)
elif key in ("--no-upload", "-b"):
task_set_option("no_upload", True)
elif key in ("--dry-run", "-n"):
task_set_option("no_upload", True)
task_set_option("no_tickets", True)
elif key in ("--config", "-c"):
task_set_option("config", val)
elif key in ("--notimechange", ):
task_set_option("notimechange", True)
else:
raise StandardError("Error: Unrecognised argument '%s'." % key)
return True
def task_run_core():
"""
Main daemon task.
Returns True when run successfully. False otherwise.
"""
rules_to_reset = task_get_option("reset_rules")
if rules_to_reset:
write_message("Resetting the following rules: %s" % rules_to_reset)
for rule in rules_to_reset:
reset_rule_last_run(rule)
plugins = load_plugins()
rules = load_rules(plugins)
write_message("Loaded rules: %s" % rules, verbose=9)
task_set_option('plugins', plugins)
recids_for_rules = get_recids_for_rules(rules)
write_message("recids for rules: %s" % recids_for_rules, verbose=9)
update_database = not (task_has_option('record_ids') or
task_get_option('no_upload', False) or
task_get_option('no_tickets', False))
if update_database:
next_starting_dates = {}
for rule_name, rule in rules.iteritems():
next_starting_dates[rule_name] = get_next_starting_date(rule)
all_recids = intbitset([])
single_rules = set()
batch_rules = set()
for rule_name, rule_recids in recids_for_rules.iteritems():
all_recids.union_update(rule_recids)
if plugins[rules[rule_name]["check"]]["batch"]:
batch_rules.add(rule_name)
else:
single_rules.add(rule_name)
records_to_upload_holdingpen = []
records_to_upload_replace = []
records_to_submit_tickets = []
for batch in iter_batches(all_recids, CFG_BATCH_SIZE):
for rule_name in batch_rules:
rule = rules[rule_name]
rule_recids = recids_for_rules[rule_name]
task_sleep_now_if_required(can_stop_too=True)
records = []
for i, record_id, record in batch:
if record_id in rule_recids:
records.append(record)
if len(records):
check_records(rule, records)
# Then run them through normal rules
for i, record_id, record in batch:
progress_percent = int(float(i) / len(all_recids) * 100)
task_update_progress("Processing record %s/%s (%i%%)." %
(i, len(all_recids), progress_percent))
write_message("Processing record %s" % record_id)
for rule_name in single_rules:
rule = rules[rule_name]
rule_recids = recids_for_rules[rule_name]
task_sleep_now_if_required(can_stop_too=True)
if record_id in rule_recids:
check_record(rule, record)
if record.amended:
if record.holdingpen:
records_to_upload_holdingpen.append(record)
else:
records_to_upload_replace.append(record)
if not record.valid:
records_to_submit_tickets.append(record)
if len(records_to_submit_tickets) >= CFG_BATCH_SIZE:
Tickets(records_to_submit_tickets).submit()
records_to_submit_tickets = []
if len(records_to_upload_holdingpen) >= CFG_BATCH_SIZE:
upload_amendments(records_to_upload_holdingpen, True)
records_to_upload_holdingpen = []
if len(records_to_upload_replace) >= CFG_BATCH_SIZE:
upload_amendments(records_to_upload_replace, False)
records_to_upload_replace = []
## In case there are still some remaining amended records
if records_to_submit_tickets:
Tickets(records_to_submit_tickets).submit()
if records_to_upload_holdingpen:
upload_amendments(records_to_upload_holdingpen, True)
if records_to_upload_replace:
upload_amendments(records_to_upload_replace, False)
# Update the database with the last time each rule was ran
if update_database:
for rule_name, rule in rules.iteritems():
update_rule_last_run(rule_name, next_starting_dates[rule_name])
return True
def upload_amendments(records, holdingpen):
""" Upload a modified record """
if task_get_option("no_upload", False) or len(records) == 0:
return
xml = '<collection xmlns="http://www.loc.gov/MARC21/slim">'
for record in records:
xml += record_xml_output(record)
xml += "</collection>"
tmp_file_fd, tmp_file = mkstemp(
suffix='.xml',
prefix="bibcheckfile_%s" % time.strftime("%Y-%m-%d_%H:%M:%S"),
dir=CFG_TMPSHAREDDIR
)
os.write(tmp_file_fd, xml)
os.close(tmp_file_fd)
os.chmod(tmp_file, 0644)
if holdingpen:
flag = "-o"
else:
flag = "-r"
if task_get_option("notimechange"):
task = task_low_level_submission('bibupload', 'bibcheck', flag,
tmp_file, "--notimechange")
else:
task = task_low_level_submission('bibupload', 'bibcheck', flag,
tmp_file)
write_message("Submitted bibupload task %s" % task)
def check_record(rule, record):
"""
Check a record against a rule
"""
plugins = task_get_option("plugins")
record.set_rule(rule)
plugin = plugins[rule["check"]]
if not record.is_dummy():
return plugin["check_record"](record, **rule["checker_params"])
def check_records(rule, records):
"""
Check a set of records against a batch rule
"""
plugins = task_get_option("plugins")
for record in records:
record.set_rule(rule)
plugin = plugins[rule["check"]]
return plugin["check_records"](records, **rule["checker_params"])
def get_rule_lastrun(rule_name):
"""
Get the last time a rule was run, or the oldest representable datetime
if the rule was never ran.
"""
res = run_sql("SELECT last_run FROM bibcheck_rules WHERE name=%s;",
(rule_name,))
if len(res) == 0 or res[0][0] is None:
return datetime(1900, 1, 1)
else:
return res[0][0]
def get_next_starting_date(rule):
"""Calculate the date the next bibcheck run should consider as initial.
If no filter has been specified then the time that is set is the time the
task was started. Otherwise, it is set to the earliest date among last time
webcoll was run and the last bibindex last_update as the last_run to prevent
records that have yet to be categorized from being perpetually ignored.
"""
def dt(t):
return datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
# Upper limit
task_starting_time = dt(task_get_task_param('task_starting_time'))
for key, val in rule.iteritems():
if key.startswith("filter_") and val:
break
else:
return task_starting_time
# Lower limit
min_last_updated = run_sql("select min(last_updated) from idxINDEX")[0][0]
cache_last_updated = dt(get_cache_last_updated_timestamp())
return min(min_last_updated, task_starting_time, cache_last_updated)
def update_rule_last_run(rule_name, next_starting_date):
"""
Set the last time a rule was run.
This function should be called after a rule has been ran.
"""
next_starting_date_str = datetime.strftime(next_starting_date,
"%Y-%m-%d %H:%M:%S")
run_sql("""INSERT INTO bibcheck_rules(name, last_run) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE last_run=%s""",
(rule_name, next_starting_date_str, next_starting_date_str))
def reset_rule_last_run(rule_name):
"""
Reset the last time a rule was run. This will cause the rule to be
ran on all matching records (not only modified ones)
"""
run_sql("DELETE FROM bibcheck_rules WHERE name=%s", (rule_name,))
def load_plugins():
"""
Will load all the plugins found under the bibcheck_plugins folder.
Returns a list of plugin objects.
"""
plugin_dir = os.path.join(CFG_PYLIBDIR, "invenio/bibcheck_plugins/*.py")
# Load plugins
plugins = PluginContainer(plugin_dir,
plugin_builder=_bibcheck_plugin_builder)
# Check for broken plug-ins
broken = plugins.get_broken_plugins()
if broken:
for plugin, info in broken.items():
print "Failed to load %s:\n" % plugin
print "".join(traceback.format_exception(*info))
enabled = plugins.get_enabled_plugins()
enabled.pop("__init__", None)
return enabled
def load_rule(config, plugins, rule_name):
"""Read rule 'rule_name' from the config file """
checker_params = {}
rule = {
"checker_params": checker_params,
"holdingpen": False,
"name": rule_name
}
def encode(obj):
""" Encode a decoded json object strings """
if isinstance(obj, dict):
return dict([(encode(key), encode(value)) for key, value in
obj.iteritems()])
elif isinstance(obj, list):
return [encode(element) for element in obj]
elif isinstance(obj, unicode):
return obj.encode('utf-8')
else:
return obj
def parse_arg(argument_str, arg_name):
try:
return encode(json.loads(argument_str))
except ValueError:
raise RulesParseError(rule_name, "Invalid value in argument '%s'" %
arg_name)
for key, val in config.items(rule_name):
if key in ("filter_pattern",
"filter_field",
"filter_collection",
"filter_limit"):
rule[key] = val
elif key in ("holdingpen",
"consider_deleted_records"):
rule[key] = val.lower() in ("true", "1", "yes", "on")
elif key == "check":
rule["check"] = val
if val not in plugins:
raise RulesParseError(rule_name, "Invalid checker '%s'" % val)
elif key.startswith("check."):
checker_params[key[len("check."):]] = parse_arg(val, key)
else:
raise RulesParseError(rule_name, "Invalid rule option '%s'" % key)
if "check" not in rule:
raise RulesParseError(rule_name, "Doesn't have a checker")
plugin = plugins[rule["check"]]
if not plugin["mandatory_args"].issubset(checker_params.keys()):
raise RulesParseError(rule_name, "Plugin mandatory argument not specified")
if not plugin["all_args"].issuperset(checker_params.keys()):
raise RulesParseError(rule_name, "Unknown plugin argument")
return rule
def load_rules(plugins):
"""
Load the rules and return a dict with the rules
"""
config = task_get_option("config", "rules.cfg")
filename = os.path.join(CFG_ETCDIR, "bibcheck/", config)
config = RawConfigParser()
config.readfp(open(filename))
rules = {}
rule_names = config.sections()
enabled = task_get_option("enabled_rules", None)
if enabled is not None:
rule_names = enabled.intersection(rule_names)
for rule_name in rule_names:
try:
rules[rule_name] = load_rule(config, plugins, rule_name)
except RulesParseError, ex:
print ex
write_message(ex)
return rules
def get_recids_for_rules(rules):
"""
Generates the final list of record IDs to load.
@param rules dict of rules {rule_name: rule_dict}
@type rules: dict of rules
@return dict {rule_name: array of record IDs}
"""
override_record_ids = task_get_option("record_ids")
recids = {}
for rule_name, rule in rules.iteritems():
if "filter_pattern" in rule:
query = rule["filter_pattern"]
if "filter_collection" in rule:
collections = rule["filter_collection"].split()
else:
collections = None
write_message("Performing given search query: '%s'" % query)
if collections:
result = perform_request_search(
p=query,
of='intbitset',
wl=rule.get('filter_limit', 0),
f=rule.get('filter_field', None),
c=collections
)
else:
result = search_pattern(
p=query,
wl=rule.get('filter_limit', 0),
f=rule.get('filter_field', None),
)
else:
result = intbitset(trailing_bits=True)
if override_record_ids is not None:
result.intersection_update(override_record_ids)
else:
last_run = get_rule_lastrun(rule_name)
modified_recids = get_modified_records_since(last_run)
if not "consider_deleted_records" in rule:
modified_recids -= search_unit_in_bibxxx(p='DELETED', f='980__%', type='e')
if CFG_CERN_SITE:
modified_recids -= search_unit_in_bibxxx(p='DUMMY', f='980__%', type='e')
result.intersection_update(modified_recids)
recids[rule_name] = result
return recids
def iter_batches(records, batch_size):
"""
like enumerate_records(), but yield batches of records of size
batch_size instead of records
"""
iterator = enumerate_records(records)
while True:
batch = list(itertools.islice(iterator, batch_size))
if len(batch) > 0:
yield batch
else:
return
def enumerate_records(records):
"""
Given an array of record IDs this function will yield a
triplet of the count (starting from 0), the record ID and
the record object.
@param record: Array of record IDs
@type record: int
@yield: tuple (count, recordId, record structure (dict))
"""
for i, recid in enumerate(records):
record = get_bibrecord(int(recid))
if not record:
write_message("Error: could not load record '%s'." % (recid,))
continue
yield i, int(recid), AmendableRecord(record)
def _bibcheck_plugin_builder(plugin_name, plugin_code):
"""
Custom builder for pluginutils.
@param plugin_name: the name of the plugin.
@type plugin_name: string
@param plugin_code: the code of the module as just read from
filesystem.
@type plugin_code: module
@return: the plugin
"""
if plugin_name == "__init__":
return
plugin = {}
plugin["check_record"] = getattr(plugin_code, "check_record", None)
plugin["check_records"] = getattr(plugin_code, "check_records", None)
plugin["name"] = plugin_name
if (plugin["check_record"] is None) == (plugin["check_records"] is None):
raise Exception("Plugin doesn't implement one check_record method")
plugin["batch"] = plugin["check_records"] is not None
argspec = inspect.getargspec(plugin["check_record"] or plugin["check_records"])
args, defaults = argspec[0], argspec[3]
if len(args) == 0:
msg = "Plugin %s: check_record must accept at least one argument"
raise Exception(msg % plugin_name)
mandatory_args = args[1:len(args)-len(defaults or [])]
plugin["mandatory_args"] = set(mandatory_args)
plugin["all_args"] = set(args[1:])
return plugin
def print_rules():
"""Prints the valid rules to stdout"""
plugins = load_plugins()
for rule_name, rule in load_rules(plugins).items():
print "Rule %s:" % rule_name
if "filter_pattern" in rule:
print " - Filter: %s" % rule["filter_pattern"]
if "filter_collection" in rule:
print " - Filter collection: %s" % rule["filter_collection"]
print " - Checker: %s" % rule["check"]
if len(rule["checker_params"]) > 0:
print " Parameters:"
for param, val in rule["checker_params"].items():
print " %s = %s" % (param, json.dumps(val))
print
def print_plugins():
"""Prints the enabled plugins to stdout"""
all_plugins = load_plugins()
print "Enabled plugins:"
for plugin in all_plugins.values():
print " -%s" % plugin["name"]
optional_args = plugin["all_args"].difference(plugin["mandatory_args"])
if len(plugin["mandatory_args"]):
print " Mandatory args: ", ", ".join(plugin["mandatory_args"])
if len(optional_args):
print " Optional args: ", ", ".join(optional_args)
print
def main():
"""Constructs the BibCheck bibtask."""
usage = """
Scheduled (daemon) options:
-l, --list-plugins List all plugins and exit
-r, --list-rules List all rules and exit
-e, --enable-rules=rules Enable only some rules (comma separated)
-a, --all=rules Run the specified rules in all matching records (not
only modified ones)
-i, --id=ids Run only in the specified record ids or ranges (comma
separated), ignoring all other filters
-q, --queue=queue Create tickets in the specified RT Queue (Default
Bibcheck)
-t, --no-tickets Don't create any ticket in RT. Useful for debugging
-b, --no-upload Don't upload changes to the database
-n, --dry-run Like --no-tickets and --no-upload
-c, --config By default bibcheck reads the file rules.cfg. This
allows to specify a different config file
--notimechange schedules bibuploads with the option --notimechange
(useful not to trigger reindexing)
If any of the options --id, --no-tickets, --no-upload or --dry-run is enabled,
bibcheck won't update the last-run-time of a task in the database.
Examples:
(run a periodical daemon job that checks the rules from rules.cfg)
bibcheck -s1d
(Run bibcheck on records 1, 2, 3, 5, 6, 7, 8, 9 and 10)
bibcheck -i 1,2,3,5-10
(Run only the rule foobar in all the records)
bibcheck -a foobar -e foobar
(Run only the rules foo and bar on modified records)
bibcheck -e foo,bar
"""
try:
opts = getopt.getopt(sys.argv[1:], "lr",
["list-plugins", "list-rules"])[0]
except getopt.GetoptError:
opts = []
for opt, dummy in opts:
if opt in ["-l", "--list-plugins"]:
print_plugins()
return
elif opt in ["-r", "--list-rules"]:
print_rules()
return
# Build and submit the task
task_init(authorization_action='runbibcheck',
authorization_msg="BibCheck Task Submission",
description="",
help_specific_usage=usage,
version="Invenio v%s" % CFG_VERSION,
specific_params=("hvtbnV:e:a:i:q:c:p:", ["help", "version",
"verbose=", "enable-rules=", "all=", "id=", "queue=",
"no-tickets", "no-upload", "dry-run", "config",
"notimechange", "ticket-creation-policy="]),
task_submit_elaborate_specific_parameter_fnc=task_parse_options,
task_run_fnc=task_run_core)
|
CERNDocumentServer/invenio
|
modules/bibcheck/lib/bibcheck_task.py
|
Python
|
gpl-2.0
| 37,836
|
from django.conf import settings
from django.utils.text import slugify
from django.test import Client, override_settings
from django.urls import reverse
from evennia.utils import class_from_module
from evennia.utils.test_resources import EvenniaTest
class EvenniaWebTest(EvenniaTest):
# Use the same classes the views are expecting
account_typeclass = settings.BASE_ACCOUNT_TYPECLASS
object_typeclass = settings.BASE_OBJECT_TYPECLASS
character_typeclass = settings.BASE_CHARACTER_TYPECLASS
exit_typeclass = settings.BASE_EXIT_TYPECLASS
room_typeclass = settings.BASE_ROOM_TYPECLASS
script_typeclass = settings.BASE_SCRIPT_TYPECLASS
channel_typeclass = settings.BASE_CHANNEL_TYPECLASS
# Default named url
url_name = "index"
# Response to expect for unauthenticated requests
unauthenticated_response = 200
# Response to expect for authenticated requests
authenticated_response = 200
def setUp(self):
super(EvenniaWebTest, self).setUp()
# Add chars to account rosters
self.account.db._playable_characters = [self.char1]
self.account2.db._playable_characters = [self.char2]
for account in (self.account, self.account2):
# Demote accounts to Player permissions
account.permissions.add("Player")
account.permissions.remove("Developer")
# Grant permissions to chars
for char in account.db._playable_characters:
char.locks.add("edit:id(%s) or perm(Admin)" % account.pk)
char.locks.add("delete:id(%s) or perm(Admin)" % account.pk)
char.locks.add("view:all()")
def test_valid_chars(self):
"Make sure account has playable characters"
self.assertTrue(self.char1 in self.account.db._playable_characters)
self.assertTrue(self.char2 in self.account2.db._playable_characters)
def get_kwargs(self):
return {}
def test_get(self):
# Try accessing page while not logged in
response = self.client.get(reverse(self.url_name, kwargs=self.get_kwargs()))
self.assertEqual(response.status_code, self.unauthenticated_response)
def login(self):
return self.client.login(username="TestAccount", password="testpassword")
def test_get_authenticated(self):
logged_in = self.login()
self.assertTrue(logged_in, "Account failed to log in!")
# Try accessing page while logged in
response = self.client.get(reverse(self.url_name, kwargs=self.get_kwargs()), follow=True)
self.assertEqual(response.status_code, self.authenticated_response)
# ------------------------------------------------------------------------------
class AdminTest(EvenniaWebTest):
url_name = "django_admin"
unauthenticated_response = 302
class IndexTest(EvenniaWebTest):
url_name = "index"
class RegisterTest(EvenniaWebTest):
url_name = "register"
class LoginTest(EvenniaWebTest):
url_name = "login"
class LogoutTest(EvenniaWebTest):
url_name = "logout"
class PasswordResetTest(EvenniaWebTest):
url_name = "password_change"
unauthenticated_response = 302
class WebclientTest(EvenniaWebTest):
url_name = "webclient:index"
@override_settings(WEBCLIENT_ENABLED=True)
def test_get(self):
self.authenticated_response = 200
self.unauthenticated_response = 200
super(WebclientTest, self).test_get()
@override_settings(WEBCLIENT_ENABLED=False)
def test_get_disabled(self):
self.authenticated_response = 404
self.unauthenticated_response = 404
super(WebclientTest, self).test_get()
class ChannelListTest(EvenniaWebTest):
url_name = "channels"
class ChannelDetailTest(EvenniaWebTest):
url_name = "channel-detail"
def setUp(self):
super(ChannelDetailTest, self).setUp()
klass = class_from_module(self.channel_typeclass,
fallback=settings.FALLBACK_CHANNEL_TYPECLASS)
# Create a channel
klass.create("demo")
def get_kwargs(self):
return {"slug": slugify("demo")}
class CharacterCreateView(EvenniaWebTest):
url_name = "character-create"
unauthenticated_response = 302
@override_settings(MULTISESSION_MODE=0)
def test_valid_access_multisession_0(self):
"Account1 with no characters should be able to create a new one"
self.account.db._playable_characters = []
# Login account
self.login()
# Post data for a new character
data = {"db_key": "gannon", "desc": "Some dude."}
response = self.client.post(reverse(self.url_name), data=data, follow=True)
self.assertEqual(response.status_code, 200)
# Make sure the character was actually created
self.assertTrue(
len(self.account.db._playable_characters) == 1,
"Account only has the following characters attributed to it: %s"
% self.account.db._playable_characters,
)
@override_settings(MULTISESSION_MODE=2)
@override_settings(MAX_NR_CHARACTERS=10)
def test_valid_access_multisession_2(self):
"Account1 should be able to create a new character"
# Login account
self.login()
# Post data for a new character
data = {"db_key": "gannon", "desc": "Some dude."}
response = self.client.post(reverse(self.url_name), data=data, follow=True)
self.assertEqual(response.status_code, 200)
# Make sure the character was actually created
self.assertTrue(
len(self.account.db._playable_characters) > 1,
"Account only has the following characters attributed to it: %s"
% self.account.db._playable_characters,
)
class CharacterPuppetView(EvenniaWebTest):
url_name = "character-puppet"
unauthenticated_response = 302
def get_kwargs(self):
return {"pk": self.char1.pk, "slug": slugify(self.char1.name)}
def test_invalid_access(self):
"Account1 should not be able to puppet Account2:Char2"
# Login account
self.login()
# Try to access puppet page for char2
kwargs = {"pk": self.char2.pk, "slug": slugify(self.char2.name)}
response = self.client.get(reverse(self.url_name, kwargs=kwargs), follow=True)
self.assertTrue(
response.status_code >= 400,
"Invalid access should return a 4xx code-- either obj not found or permission denied! (Returned %s)"
% response.status_code,
)
class CharacterListView(EvenniaWebTest):
url_name = "characters"
unauthenticated_response = 302
class CharacterManageView(EvenniaWebTest):
url_name = "character-manage"
unauthenticated_response = 302
class CharacterUpdateView(EvenniaWebTest):
url_name = "character-update"
unauthenticated_response = 302
def get_kwargs(self):
return {"pk": self.char1.pk, "slug": slugify(self.char1.name)}
def test_valid_access(self):
"Account1 should be able to update Account1:Char1"
# Login account
self.login()
# Try to access update page for char1
response = self.client.get(reverse(self.url_name, kwargs=self.get_kwargs()), follow=True)
self.assertEqual(response.status_code, 200)
# Try to update char1 desc
data = {"db_key": self.char1.db_key, "desc": "Just a regular type of dude."}
response = self.client.post(
reverse(self.url_name, kwargs=self.get_kwargs()), data=data, follow=True
)
self.assertEqual(response.status_code, 200)
# Make sure the change was made successfully
self.assertEqual(self.char1.db.desc, data["desc"])
def test_invalid_access(self):
"Account1 should not be able to update Account2:Char2"
# Login account
self.login()
# Try to access update page for char2
kwargs = {"pk": self.char2.pk, "slug": slugify(self.char2.name)}
response = self.client.get(reverse(self.url_name, kwargs=kwargs), follow=True)
self.assertEqual(response.status_code, 403)
class CharacterDeleteView(EvenniaWebTest):
url_name = "character-delete"
unauthenticated_response = 302
def get_kwargs(self):
return {"pk": self.char1.pk, "slug": slugify(self.char1.name)}
def test_valid_access(self):
"Account1 should be able to delete Account1:Char1"
# Login account
self.login()
# Try to access delete page for char1
response = self.client.get(reverse(self.url_name, kwargs=self.get_kwargs()), follow=True)
self.assertEqual(response.status_code, 200)
# Proceed with deleting it
data = {"value": "yes"}
response = self.client.post(
reverse(self.url_name, kwargs=self.get_kwargs()), data=data, follow=True
)
self.assertEqual(response.status_code, 200)
# Make sure it deleted
self.assertFalse(
self.char1 in self.account.db._playable_characters,
"Char1 is still in Account playable characters list.",
)
def test_invalid_access(self):
"Account1 should not be able to delete Account2:Char2"
# Login account
self.login()
# Try to access delete page for char2
kwargs = {"pk": self.char2.pk, "slug": slugify(self.char2.name)}
response = self.client.get(reverse(self.url_name, kwargs=kwargs), follow=True)
self.assertEqual(response.status_code, 403)
|
jamesbeebop/evennia
|
evennia/web/website/tests.py
|
Python
|
bsd-3-clause
| 9,576
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-09-14 11:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0028_uniqueify_rfid'),
]
operations = [
migrations.AlterField(
model_name='onlineuser',
name='field_of_study',
field=models.SmallIntegerField(choices=[(0, 'Gjest'), (1, 'Bachelor i Informatikk'), (10, 'Programvaresystemer'), (11, 'Databaser og søk'), (12, 'Algoritmer og datamaskiner'), (13, 'Spillteknologi'), (14, 'Kunstig intelligens'), (15, 'Helseinformatikk'), (16, 'Interaksjonsdesign, spill- og læringsteknologi'), (30, 'Annen mastergrad'), (40, 'Sosialt medlem'), (80, 'PhD'), (90, 'International'), (100, 'Annet Onlinemedlem')], default=0, verbose_name='studieretning'),
),
migrations.AlterField(
model_name='position',
name='committee',
field=models.CharField(choices=[('hs', 'Hovedstyret'), ('appkom', 'Applikasjonskomiteen'), ('arrkom', 'Arrangementskomiteen'), ('bankom', 'Bank- og økonomikomiteen'), ('bedkom', 'Bedriftskomiteen'), ('dotkom', 'Drifts- og utviklingskomiteen'), ('ekskom', 'Ekskursjonskomiteen'), ('fagkom', 'Fag- og kurskomiteen'), ('jubkom', 'Jubileumskomiteen'), ('pangkom', 'Pensjonistkomiteen'), ('prokom', 'Profil-og aviskomiteen'), ('redaksjonen', 'Redaksjonen'), ('seniorkom', 'Seniorkomiteen'), ('trikom', 'Trivselskomiteen'), ('velkom', 'Velkomstkomiteen')], default='hs', max_length=20, verbose_name='komite'),
),
]
|
dotKom/onlineweb4
|
apps/authentication/migrations/0029_auto_20180914_1305.py
|
Python
|
mit
| 1,623
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Handling of proxies."""
from PyQt5.QtNetwork import QNetworkProxy, QNetworkProxyFactory
from qutebrowser.config import config, configtypes
from qutebrowser.utils import objreg
from qutebrowser.browser.network import pac
def init():
"""Set the application wide proxy factory."""
proxy_factory = ProxyFactory()
objreg.register('proxy-factory', proxy_factory)
QNetworkProxyFactory.setApplicationProxyFactory(proxy_factory)
class ProxyFactory(QNetworkProxyFactory):
"""Factory for proxies to be used by qutebrowser."""
def get_error(self):
"""Check if proxy can't be resolved.
Return:
None if proxy is correct, otherwise an error message.
"""
proxy = config.get('network', 'proxy')
if isinstance(proxy, pac.PACFetcher):
return proxy.fetch_error()
else:
return None
def queryProxy(self, query):
"""Get the QNetworkProxies for a query.
Args:
query: The QNetworkProxyQuery to get a proxy for.
Return:
A list of QNetworkProxy objects in order of preference.
"""
proxy = config.get('network', 'proxy')
if proxy is configtypes.SYSTEM_PROXY:
proxies = QNetworkProxyFactory.systemProxyForQuery(query)
elif isinstance(proxy, pac.PACFetcher):
proxies = proxy.resolve(query)
else:
proxies = [proxy]
for p in proxies:
if p.type() != QNetworkProxy.NoProxy:
capabilities = p.capabilities()
if config.get('network', 'proxy-dns-requests'):
capabilities |= QNetworkProxy.HostNameLookupCapability
else:
capabilities &= ~QNetworkProxy.HostNameLookupCapability
p.setCapabilities(capabilities)
return proxies
|
lahwaacz/qutebrowser
|
qutebrowser/browser/network/proxy.py
|
Python
|
gpl-3.0
| 2,666
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Deuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import DeuscoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(DeuscoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = 200
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']) > 0, True)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 5.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 5.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
break
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
errorString = ""
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
except JSONRPCException,e:
errorString = e.error['message']
assert("Insufficient" in errorString)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 DEUS to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_deuscoinds()
self.nodes = start_nodes(4, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
error = False
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
except:
error = True
assert(error)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert_equal("fee" in result.keys(), True)
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
|
deuscoin-org/deuscoin-core
|
qa/rpc-tests/fundrawtransaction.py
|
Python
|
mit
| 24,552
|
from tempfile import TemporaryFile
from time import sleep
from subprocess import Popen, PIPE
from ..shell import BaseShellExec
from ....util import Bunch, kill_pid
from logging import getLogger
log = getLogger(__name__)
TIMEOUT_ERROR_MESSAGE = u'Execution timed out'
TIMEOUT_RETURN_CODE = -1
DEFAULT_TIMEOUT = 60
DEFAULT_TIMEOUT_CHECK_INTERVAL = 3
class LocalShell(BaseShellExec):
"""
>>> shell = LocalShell()
>>> def exec_python(script, **kwds): return shell.execute('python -c "%s"' % script, **kwds)
>>> exec_result = exec_python("from __future__ import print_function; print('Hello World')")
>>> exec_result.stderr == u''
True
>>> exec_result.stdout.strip() == u'Hello World'
True
>>> exec_result = exec_python("import time; time.sleep(90)", timeout=1, timeout_check_interval=.1)
>>> exec_result.stdout == u''
True
>>> exec_result.stderr == 'Execution timed out'
True
>>> exec_result.returncode == TIMEOUT_RETURN_CODE
True
"""
def __init__(self, **kwds):
pass
def execute(self, cmd, persist=False, timeout=DEFAULT_TIMEOUT, timeout_check_interval=DEFAULT_TIMEOUT_CHECK_INTERVAL, **kwds):
outf = TemporaryFile()
p = Popen(cmd, shell=True, stdin=None, stdout=outf, stderr=PIPE)
# poll until timeout
for i in range(int(timeout / timeout_check_interval)):
r = p.poll()
if r is not None:
break
sleep(timeout_check_interval)
else:
kill_pid(p.pid)
return Bunch(stdout=u'', stderr=TIMEOUT_ERROR_MESSAGE, returncode=TIMEOUT_RETURN_CODE)
outf.seek(0)
return Bunch(stdout=_read_str(outf), stderr=_read_str(p.stderr), returncode=p.returncode)
def _read_str(stream):
contents = stream.read()
return contents.decode('UTF-8') if isinstance(contents, bytes) else contents
__all__ = ('LocalShell',)
|
ssorgatem/pulsar
|
pulsar/managers/util/cli/shell/local.py
|
Python
|
apache-2.0
| 1,918
|
"""
This file is part of DeepConvSep.
Copyright (c) 2014-2017 Marius Miron <miron.marius at gmail.com>
DeepConvSep is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepConvSep is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the Affero GPL License
along with DeepConvSep. If not, see <http://www.gnu.org/licenses/>.
"""
import os,sys
import transform
import util
from transform import transformFFT
import numpy as np
import re
import itertools as it
from scipy.signal import blackmanharris as blackmanharris
import climate
if __name__ == "__main__":
if len(sys.argv)>-1:
climate.add_arg('--db', help="the Bach10 Sibelius dataset path")
climate.add_arg('--feature_path', help="the path where to save the features")
climate.add_arg('--gt', help="compute features for the ground truth aligned rendition or the others")
db=None
kwargs = climate.parse_args()
if kwargs.__getattribute__('db'):
db = kwargs.__getattribute__('db')
else:
db='/home/marius/Documents/Database/Bach10/Source separation/'
# db='/Volumes/Macintosh HD 2/Documents/Database/Bach10/Source separation/'
if kwargs.__getattribute__('feature_path'):
feature_path = kwargs.__getattribute__('feature_path')
else:
feature_path=os.path.join(db,'transforms','t3_synth_aug_more')
if kwargs.__getattribute__('gt'):
gt = int(kwargs.__getattribute__('gt'))
else:
gt = True
assert os.path.isdir(db), "Please input the directory for the Bach10 Sibelius dataset with --db path_to_Bach10"
sources = ['bassoon','clarinet','saxophone','violin']
sources_midi = ['bassoon','clarinet','saxophone','violin']
if gt:
style = ['gt']
style_midi = ['']
time_shifts=[0.]
intensity_shifts=[1.]
else:
style = ['fast','slow','original']
style_midi = ['_fast20','_slow20','_original']
time_shifts=[0.,0.1,0.2]
intensity_shifts=[1.]
cc=[(time_shifts[i], intensity_shifts[j]) for i in xrange(len(time_shifts)) for j in xrange(len(intensity_shifts))]
if len(cc)<len(sources):
combo1 = list(it.product(cc,repeat=len(sources)))
combo = []
for i in range(len(combo1)):
c = np.array(combo1[i])
if (len(intensity_shifts)==1 and not(all(x == c[0,0] for x in c[:,0]))) \
or (len(time_shifts)==1 and not(all(x == c[0,1] for x in c[:,1]))):
combo.append(c)
else:
combo = list(it.permutations(cc,len(sources)))
if len(combo)==0:
combo = [[[time_shifts[0],intensity_shifts[0]] for s in sources]]
#print len(combo)
#compute transform
for f in os.listdir(db):
if os.path.isdir(os.path.join(db,f)) and f[0].isdigit() :
if not f.startswith('.'):
for s in range(len(style)):
if not os.path.exists(os.path.join(feature_path,style[s])):
os.makedirs(os.path.join(feature_path,style[s]))
for co in combo:
c = np.array(co)
for i in range(len(sources)):
#read the audio file
sounds,sampleRate,bitrate = util.readAudioScipy(os.path.join(db,f,f+'_'+style[s]+'_'+sources[i]+'.wav'))
if sampleRate != 44100:
print 'sample rate is not consistent'
if i==0:
tt=transformFFT(frameSize=4096, hopSize=512, sampleRate=44100, window=blackmanharris)
nframes = int(np.ceil(len(sounds) / np.double(tt.hopSize))) + 2
size = int(len(sounds)-int(np.max(np.array(c[:,0]))*sampleRate))
audio = np.zeros((size,len(sources)+1))
if c[i,0] == 0:
if len(sounds) > size:
segment = sounds[:size]
else:
segment = np.zeros(size)
segment[:len(sounds)] = sounds
elif c[i,0] < 0:
seg_idx = int(abs(c[i,0]*sampleRate))
segment = np.pad(sounds,((0,seg_idx+np.maximum(0,size-len(sounds)))), mode='constant')
if len(segment)<(size+seg_idx):
segment = np.pad(segment,((0,size+seg_idx - len(segment))), mode='constant')
segment = segment[seg_idx:size+seg_idx]
else:
segment = np.pad(sounds,((int(c[i,0]*sampleRate),0)), mode='constant')
if len(segment)<size:
segment = np.pad(segment,((0,size - len(segment))), mode='constant')
segment = segment[:size]
audio[:,0] = audio[:,0] + c[i,1] * segment[:size]
audio[:,i+1] = c[i,1] * segment[:size]
segment = None
sounds = None
tt.compute_transform(audio,os.path.join(feature_path,style[s],f+'_'+str(c).encode('base64','strict')+'.data'),phase=False)
|
gerruz/DeepConvSep
|
examples/bach10/compute_features_bach10sibelius.py
|
Python
|
agpl-3.0
| 5,850
|
import argparse, os, sys, subprocess
from pathlib import Path
from distutils.spawn import find_executable
from . import alias, autocomplete
BASH_LOGIN_FILES = ['~/.bash_profile', '~/.bash_login', '~/.profile', '~/.bashrc']
def get_bash_file() -> str:
paths = [str(Path(_).expanduser()) for _ in BASH_LOGIN_FILES]
result = [_ for _ in paths if os.path.exists(_) and os.path.isfile(_) and os.access(_, os.R_OK)]
if not result:
default = paths[0]
open(default, 'w').close()
result = [default]
return result[0]
def get_zsh_file() -> str:
z_dot_dir = os.environ.get('ZDOTDIR', '~')
zsh_file = str(Path(z_dot_dir + '/.zshenv').expanduser())
if not os.path.exists(zsh_file) or not os.path.isfile(zsh_file):
open(zsh_file, 'w').close()
return zsh_file
def get_fish_functions_file() -> str:
fish_functions = str(Path('~/.config/fish/functions/').expanduser())
if not os.path.exists(str(fish_functions)):
os.makedirs(str(fish_functions))
fish_file = str(Path(fish_functions + '/awsume.fish').expanduser())
if not os.path.exists(fish_file) or not os.path.isfile(fish_file):
open(fish_file, 'w').close()
return fish_file
def get_fish_completions_file() -> str:
fish_completions = str(Path('~/.config/fish/completions/').expanduser())
if not os.path.exists(str(fish_completions)):
os.makedirs(str(fish_completions))
fish_file = str(Path(fish_completions + '/awsume.fish').expanduser())
if not os.path.exists(fish_file) or not os.path.isfile(fish_file):
open(fish_file, 'w').close()
return fish_file
def get_powershell_file() -> str:
(powershell_file, _) = subprocess.Popen(['powershell', 'Write-Host $profile'], stdout=subprocess.PIPE, shell=True).communicate()
if powershell_file:
powershell_file = str(powershell_file.decode('ascii')).replace('\r\n', '').replace('\n', '')
return powershell_file
return None
def install(shell: str, alias_file: str, autocomplete_file: str):
if alias_file:
alias.main(shell, alias_file)
if autocomplete_file:
autocomplete.main(shell, autocomplete_file)
def setup_bash(alias_file: str, autocomplete_file: str):
print('===== Setting up bash =====')
bash_file = get_bash_file()
alias_file = alias_file or bash_file
autocomplete_file = autocomplete_file or bash_file
if not bash_file:
print('===== Could not locate bash file =====')
install('bash', alias_file, autocomplete_file)
def setup_zsh(alias_file: str, autocomplete_file: str):
print('===== Setting up zsh =====')
zsh_file = get_zsh_file()
alias_file = alias_file or zsh_file
autocomplete_file = autocomplete_file or zsh_file
if not zsh_file:
print('===== Could not locate zsh file =====')
install('zsh', alias_file, autocomplete_file)
def setup_fish(alias_file: str, autocomplete_file: str):
print('===== Setting up fish =====')
fish_functions_file = get_fish_functions_file()
fish_completions_file = get_fish_completions_file()
alias_file = alias_file or fish_functions_file
autocomplete_file = autocomplete_file or fish_completions_file
install('fish', alias_file, autocomplete_file)
def setup_powershell(alias_file: str, autocomplete_file: str):
print('===== Setting up powershell =====')
powershell_file = get_powershell_file()
autocomplete_file = autocomplete_file or powershell_file
if not powershell_file:
print('===== Could not locate powershell file =====')
install('powershell', None, autocomplete_file)
def parse_args(argv: sys.argv) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('--shell',
default=None,
dest='shell',
metavar='shell',
help='The shell you will use awsume under',
required=False,
choices=['bash', 'zsh', 'fish', 'powershell']
)
parser.add_argument('--autocomplete-file',
default=None,
dest='autocomplete_file',
metavar='autocomplete_file',
required=False,
help='The file you want the autocomplete script to be defined in',
)
parser.add_argument('--alias-file',
default=None,
dest='alias_file',
metavar='alias_file',
required=False,
help='The file you want the alias to be defined in',
)
args = parser.parse_args(argv)
if args.shell in ['powershell'] and args.alias_file:
parser.error('No alias file is needed for shell: powershell')
if not args.shell and (args.autocomplete_file or args.alias_file):
parser.error('Cannot specify autocomplete file or alias file when not specifying shell')
return args
def run(shell: str = None, alias_file: str = None, autocomplete_file: str = None):
if os.environ.get('AWSUME_SKIP_ALIAS_SETUP'):
print('===== Skipping Alias Setup =====')
return
setup_functions = {
'bash': setup_bash,
'zsh': setup_zsh,
'powershell': setup_powershell,
'fish': setup_fish,
}
if not shell:
if find_executable('bash'):
setup_functions['bash'](None, None)
if find_executable('zsh'):
setup_functions['zsh'](None, None)
if find_executable('fish'):
setup_functions['fish'](None, None)
if find_executable('powershell'):
setup_functions['powershell'](None, None)
else:
setup_functions[shell](alias_file, autocomplete_file)
print('===== Finished setting up =====')
def main():
args = parse_args(sys.argv[1:])
run(args.shell, args.alias_file, args.autocomplete_file)
|
trek10inc/awsume
|
awsume/configure/main.py
|
Python
|
mit
| 5,698
|
'''
This script enables Kafka listening
for rPi captured data
@author: devopsec
'''
import subprocess
def kafkaCapture():
#allow traffic on port 6667
subprocess.run("ufw allow 6667", shell=True)
#start kafka producer
subprocess.run("pycapa --producer --kafka node1:6667 --topic pcap -i eno1", shell=True)
return None
|
flyballlabs/threatdetectionservice
|
agents/host/EnableKafkaCapture.py
|
Python
|
apache-2.0
| 353
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ProdUpgrade']
# DEPRECATED
from pyasm.search.upgrade.project import *
class ProdUpgrade(BaseUpgrade):
def upgrade_v3_8_0_v01_001(my):
my.run_sql('''
alter table submission add column "code" varchar(256);
''')
def upgrade_v2_6_0_v01_001(my):
my.run_sql('''
alter table instance drop constraint shot_code_fkey;
''')
#
# 2.6.0.rc02 again
#
def upgrade_v2_6_0_rc02_002(my):
my.run_sql('''
ALTER TABLE asset ALTER COLUMN asset_type DROP NOT NULL;
''')
def upgrade_v2_6_0_rc02_001(my):
my.run_sql('''
ALTER TABLE asset ALTER COLUMN name DROP NOT NULL;
''')
#
# 2.5.0.v01 again
#
def upgrade_v2_5_0_v01_001(my):
my.run_sql('''
ALTER TABLE shot_texture add constraint shot_texture_code_unique UNIQUE
(code);
''')
def upgrade_v2_5_0_rc09_002(my):
my.run_sql('''
CREATE INDEX texture_asset_code_idx ON texture(asset_code);
''')
def upgrade_v2_5_0_rc09_001(my):
my.run_sql('''
CREATE INDEX texture_code_idx ON texture(code);
''')
#
# 2.5.0.b07 again
#
def upgrade_v2_5_0_b07_001(my):
my.run_sql('''
ALTER TABLE layer ADD COLUMN sort_order integer;
''')
#
# 2.5.0.b04
#
def upgrade_v2_5_0_b04_002(my):
my.run_sql('''
ALTER TABLE texture alter column code type varchar(256);
''')
def upgrade_v2_5_0_b04_001(my):
my.run_sql('''
ALTER TABLE shot_texture alter column code type varchar(256);
''')
#
# 2.5.0.b03
#
def upgrade_v2_5_0_b03_003(my):
my.run_sql('''
ALTER TABLE asset DROP COLUMN images
''')
def upgrade_v2_5_0_b03_002(my):
my.run_sql('''
ALTER TABLE asset DROP COLUMN snapshot
''')
def upgrade_v2_5_0_b03_001(my):
my.run_sql('''
ALTER TABLE asset DROP COLUMN retire_status
''')
#
# 2.5.0.a01
#
def upgrade_v2_5_0_a01_006(my):
my.run_sql('''
ALTER TABLE asset ALTER COLUMN code DROP NOT NULL;
''')
def upgrade_v2_5_0_a01_005(my):
my.run_sql('''
alter table asset add primary key (id);
''')
def upgrade_v2_5_0_a01_004(my):
my.run_sql('''
alter table asset drop constraint asset_pkey;
''')
def upgrade_v2_5_0_a01_003(my):
#"$2" FOREIGN KEY (asset_code) REFERENCES asset(code) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
my.run_sql('''
alter table layer_instance drop constraint "$2";
''')
def upgrade_v2_5_0_a01_002(my):
#"asset_code_fkey" FOREIGN KEY (asset_code) REFERENCES asset(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED
my.run_sql('''
alter table instance drop constraint asset_code_fkey;
''')
def upgrade_v2_5_0_a01_001(my):
my.run_sql('''
CREATE TABLE custom_script (
id serial PRIMARY KEY,
code varchar(256),
title varchar(256),
description text,
folder varchar(1024),
script text,
login varchar(256),
"timestamp" timestamp,
s_status varchar(256)
);
''')
#
# 2.4.0.a01
#
def upgrade_v2_4_0_a01_009(my):
my.run_sql('''
alter table shot alter column code drop not null;
''')
def upgrade_v2_4_0_a01_008(my):
my.run_sql('''
alter table shot add primary key (id);
''')
def upgrade_v2_4_0_a01_007(my):
my.run_sql('''
alter table shot drop constraint shot_pkey;
''')
def upgrade_v2_4_0_a01_006(my):
my.run_sql('''
create unique index naming_code_idx on naming (code);
''')
def upgrade_v2_4_0_a01_005(my):
my.run_sql('''
alter table naming add column code varchar(256);
''')
def upgrade_v2_4_0_a01_004(my):
my.run_sql('''
create unique index pipeline_code_idx on pipeline (code);
''')
def check_v2_4_0_a01_003(my):
my.table_exists("pipeline")
def upgrade_v2_4_0_a01_003(my):
my.run_sql('''
CREATE TABLE pipeline (
id serial,
code varchar(256),
pipeline text,
"timestamp" timestamp DEFAULT now(),
search_type varchar(256),
description text,
s_status varchar(32),
PRIMARY KEY (id)
);
''')
def upgrade_v2_4_0_a01_002(my):
my.run_sql('''
create unique index widget_config_code_idx on widget_config (code);
''')
def upgrade_v2_4_0_a01_001(my):
my.run_sql('''
CREATE TABLE widget_config (
id serial,
code character varying(256),
"view" character varying(256),
category character varying(256),
search_type character varying(256),
"login" character varying(256),
config text,
"timestamp" timestamp without time zone DEFAULT now(),
s_status character varying(32),
PRIMARY KEY (id)
);
''')
#
# 2.2.0.rc03
#
def upgrade_v2_2_0_rc03_003(my):
my.run_sql('''
CREATE INDEX texture_asset_code_idx ON texture(asset_code);
''')
def upgrade_v2_2_0_rc03_002(my):
my.run_sql('''
CREATE INDEX texture_code_idx ON texture(code);
''')
def upgrade_v2_2_0_rc03_001(my):
my.run_sql('''
ALTER TABLE layer ADD COLUMN sort_order integer;
''')
#
# 2.1.0.b01
#
def upgrade_v2_1_0_b01_009(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN file_range text;
''')
def upgrade_v2_1_0_b01_008(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN client_name varchar(256);
''')
def upgrade_v2_1_0_b01_007(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN link text;
''')
def upgrade_v2_1_0_b01_006(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN login varchar(256);
''')
def upgrade_v2_1_0_b01_005(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN name varchar(256);
''')
def upgrade_v2_1_0_b01_004(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN search_id int4;
''')
def upgrade_v2_1_0_b01_003(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN search_type varchar(256);
''')
def upgrade_v2_1_0_b01_002(my):
my.run_sql('''
ALTER TABLE render ADD COLUMN pipeline_code varchar(256);
''')
def upgrade_v2_1_0_b01_001(my):
my.run_sql('''
ALTER TABLE render ADD COLUMN link text;
''')
#
# 2.1.0.a01
#
"""
def upgrade_v2_1_0_a01_001(my):
my.run_sql('''
CREATE TABLE snapshot_type (
id integer NOT NULL,
code character varying(256),
pipeline_code text,
"timestamp" timestamp without time zone DEFAULT now(),
"login" character varying(256),
s_status character varying(30),
relpath text,
refile text,
project_code character varying(256),
subcontext text,
snapshot_flavor text
);
''')
"""
#
# 2.1.0.a01
#
def upgrade_v2_1_0_a01_001(my):
my.run_sql('''
ALTER TABLE session_contents ADD COLUMN session text;
''')
def upgrade_v2_0_0_b03_001(my):
my.run_sql('''
ALTER TABLE asset alter column code TYPE varchar(256);
''')
#
# 2.0.0.b01
#
def upgrade_v2_0_0_b01_001(my):
my.run_sql('''
ALTER TABLE instance ADD COLUMN code VARCHAR(256);
''')
#
# 1.9.1.a07
#
def upgrade_v1_9_1_a06_016(my):
my.run_sql('''
ALTER TABLE render ADD COLUMN name varchar(256);
''')
def upgrade_v1_9_1_a06_015(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN context to type;
''')
def upgrade_v1_9_1_a07_014(my):
my.run_sql('''
ALTER TABLE render ALTER COLUMN login DROP NOT NULL;
''')
def upgrade_v1_9_1_a07_013(my):
my.run_sql('''
ALTER TABLE naming ADD COLUMN context varchar(256);
''')
def upgrade_v1_9_1_a07_012(my):
my.run_sql('''
ALTER TABLE naming ADD COLUMN snapshot_type varchar(256);
''')
def upgrade_v1_9_1_a07_011(my):
my.run_sql('''
CREATE UNIQUE INDEX render_code_idx on render (code);
''')
def upgrade_v1_9_1_a07_010(my):
my.run_sql('''
ALTER TABLE render ADD COLUMN type varchar(256);
''')
def upgrade_v1_9_1_a07_009(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN snapshot_code to _snapshot_code;
''')
def upgrade_v1_9_1_a07_008(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN snapshot to _snapshot;
''')
def upgrade_v1_9_1_a07_007(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN file_range to _file_range;
''')
def upgrade_v1_9_1_a07_006(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN session to _session;
''')
def upgrade_v1_9_1_a07_005(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN version to _version;
''')
def upgrade_v1_9_1_a07_003(my):
my.run_sql('''
ALTER TABLE render RENAME COLUMN images to _images;
''')
def upgrade_v1_9_1_a07_002(my):
my.run_sql('''
ALTER TABLE plate ADD COLUMN pipeline_code varchar(256);
''')
def upgrade_v1_9_1_a07_001(my):
my.run_sql('''
CREATE UNIQUE INDEX plate_code_idx on plate (code);
''')
#
# 1.9.1.a06
#
def upgrade_v1_9_1_a06_002(my):
my.run_sql('''
ALTER TABLE render add column context varchar(256);
''')
def upgrade_v1_9_1_a06_001(my):
my.run_sql('''
ALTER TABLE render add column code varchar(256);
''')
def upgrade_v1_9_1_a04_002(my):
my.run_sql('''
ALTER TABLE asset add column short_code varchar(256);
''')
def upgrade_v1_9_1_a04_001(my):
my.run_sql('''
ALTER TABLE shot add column short_code varchar(256);
''')
def upgrade_v1_9_1_a02_003(my):
my.run_sql('''
ALTER TABLE texture alter column code TYPE varchar(256);
''')
def upgrade_v1_9_1_a02_002(my):
my.run_sql('''
ALTER TABLE shot_texture alter column code TYPE varchar(256);
''')
def upgrade_v1_9_1_a02_001(my):
my.run_sql('''
alter table plate add column code varchar(256);
''')
def upgrade_v1_9_0_a1_005(my):
my.run_sql('''
insert into prod_setting (key, value, description, type, category) values ('render_job_type', 'tacticsample', 'Sample Job Type', 'sequence', 'Render');
''')
def upgrade_v1_9_0_a1_005(my):
my.run_sql('''
CREATE TABLE custom_property (
id serial NOT NULL,
search_type varchar(256),
name varchar(256),
description text,
login varchar(256),
PRIMARY KEY (id)
);
''')
def upgrade_v1_9_0_a1_004(my):
my.run_sql('''
INSERT INTO prod_setting ("category", "key", value, description, "type", search_type) VALUES ('Naming', 'use_name_as_asset_code', 'false', 'Use name as the asset code', 'sequence', 'prod/asset');
''')
def upgrade_v1_9_0_a1_003(my):
my.run_sql('''
update prod_setting set category = 'General' where category is NULL;
''')
def upgrade_v1_9_0_a1_002(my):
my.run_sql('''
ALTER TABLE prod_setting ADD COLUMN category varchar(256);
''')
def upgrade_v1_7_0_rc1_003(my):
my.run_sql('''
ALTER TABLE sequence ADD CONSTRAINT episode_code_fkey FOREIGN KEY (episode_code) REFERENCES episode(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
''')
def upgrade_v1_7_0_rc1_002(my):
my.run_sql('''
ALTER TABLE sequence add column episode_code varchar(256);
''')
def upgrade_v1_7_0_rc1_001(my):
my.run_sql('''
CREATE TABLE episode (
id serial NOT NULL,
code character varying(256) NOT NULL,
description text,
"timestamp" timestamp without time zone DEFAULT now(),
s_status character varying(30),
sort_order smallint,
CONSTRAINT episode_code_unique UNIQUE(code)
);
''')
def upgrade_v1_6_0_rc1_001(my):
my.run_sql('''
INSERT INTO prod_setting ("key", value, description, "type", search_type) VALUES ('notes_asset_prod_context', 'model|uv_layout|shader|texture|rig', 'notes context for Asset Pipeline', 'sequence', 'sthpw/note');
''')
def upgrade_v1_6_0_b2_001(my):
my.run_sql('''
CREATE TABLE sequence_instance (
id serial NOT NULL,
sequence_code character varying(30) NOT NULL,
asset_code character varying(100) NOT NULL,
"timestamp" timestamp without time zone DEFAULT now(),
status text,
"type" character varying(30)
);
''')
def upgrade_20080110(my):
my.run_sql('''
ALTER TABLE render ADD COLUMN s_status varchar(30);
''')
def upgrade_20080103(my):
my.run_sql('''
ALTER TABLE texture ALTER COLUMN code TYPE varchar(256);
''')
def upgrade_20071121(my):
# adding a priority column for Omnilab
# This should be handled by custom properties
my.run_sql('''
ALTER TABLE shot ADD COLUMN priority varchar(30);
''')
my.run_sql('''
INSERT INTO prod_setting (description, key, value, type) values ('Shot Priority', 'shot_priority', 'high|med|low', 'sequence');
''')
def upgrade_20071116(my):
my.run_sql('''
-- take milestone_code out from invisible elements
UPDATE prod_setting set value='priority' where key='invisible_elements' and search_type='sthpw/task';
''')
def upgrade_20071112(my):
my.run_sql('''
CREATE TABLE custom_property (
id serial NOT NULL,
search_type varchar(256),
name varchar(256),
description text,
login varchar(256),
PRIMARY KEY (id)
);
''')
def upgrade_20071007(my):
my.run_sql('''
ALTER TABLE layer ADD COLUMN s_status varchar(30);
''')
def upgrade_20071002(my):
my.run_sql('''
ALTER TABLE shot rename column pipeline to pipeline_code;
''')
def upgrade_20070922(my):
my.run_sql('''
CREATE TABLE render_policy (
id serial NOT NULL,
code character varying(30),
description text,
width int2,
height int2,
frame_by int2,
extra_settings text,
PRIMARY KEY (id)
);
''')
def upgrade_20070912(my):
my.run_sql('''
CREATE TABLE shot_audio (
id serial NOT NULL,
title character varying(30),
shot_code character varying(100),
PRIMARY KEY (id),
CONSTRAINT shot_code_fkey FOREIGN KEY (shot_code)
REFERENCES shot (code)
ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED
);
''')
def upgrade_20070903(my):
my.run_sql('''
ALTER TABLE ONLY storyboard
ADD CONSTRAINT storyboard_pkey PRIMARY KEY (id);
''')
def upgrade_20070817(my):
my.run_sql('''
ALTER TABLE script ADD PRIMARY KEY (id);
''')
def upgrade_20070719(my):
my.run_sql('''
ALTER TABLE cut_sequence add column sequence_code character varying(100);
ALTER TABLE cut_sequence ADD CONSTRAINT sequence_code_fkey FOREIGN KEY (sequence_code) REFERENCES sequence(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
''')
def upgrade_20070706(my):
my.run_sql('''
ALTER TABLE script ADD COLUMN sequence_code varchar(30);
''')
my.run_sql('''
ALTER TABLE script ADD COLUMN stage varchar(256);
''')
my.run_sql('''
ALTER TABLE script ADD COLUMN title text;
''')
my.run_sql('''
ALTER TABLE script ADD COLUMN author varchar(256);
''')
my.run_sql('''
ALTER TABLE script DROP COLUMN shot_code;
''')
def upgrade_20070628(my):
my.run_sql('''
CREATE TABLE cut_sequence
(
id serial NOT NULL,
shot_code character varying(30),
"type" character varying(100),
"timestamp" timestamp without time zone DEFAULT now(),
s_status character varying(30),
description text,
CONSTRAINT cut_sequence_pkey PRIMARY KEY (id),
CONSTRAINT shot_code_fkey FOREIGN KEY (shot_code)
REFERENCES shot (code) MATCH SIMPLE
ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED
)
''')
def upgrade_20070622(my):
my.run_sql('''
CREATE TABLE naming (
id serial PRIMARY KEY,
search_type varchar(100),
dir_naming text,
file_naming text
);
''')
def upgrade_20070621(my):
my.run_sql('''
alter table shot add column scan_status varchar(256);
''')
my.run_sql('''
alter table shot add column type varchar(256);
''')
def upgrade_20070605(my):
my.run_sql('''
ALTER TABLE submission ADD COLUMN status varchar(100);
''')
def upgrade_20070526(my):
my.run_sql('''
ALTER TABLE plate add column description text;
''')
def upgrade_20070522(my):
my.run_sql('''
ALTER TABLE shot_texture add column search_type varchar(256);
ALTER TABLE shot_texture add column search_id int4;
''')
def upgrade_20070522(my):
my.run_sql('''
CREATE TABLE shot_texture (
id serial NOT NULL,
description text,
shot_code character varying(50),
category character varying(200),
"timestamp" timestamp without time zone DEFAULT now(),
snapshot text,
s_status character varying(32),
code character varying(50),
pipeline_code character varying(256),
asset_context character varying(30)
);
''')
def upgrade_20070516(my):
my.run_sql('''
ALTER TABLE shot ADD COLUMN frame_in int2;
ALTER TABLE shot ADD COLUMN frame_out int2;
ALTER TABLE shot ADD COLUMN frame_note text;
''')
def upgrade_20070430(my):
my.run_sql('''
ALTER TABLE art_reference ADD COLUMN "timestamp" timestamp DEFAULT now();
''')
def upgrade_20070413(my):
my.run_sql('''
alter table prod_setting add constraint key_search_type_unique UNIQUE
(key, search_type);
''')
def upgrade_20070316(my):
my.run_sql('''
alter table submission add column artist varchar(256);
''')
def upgrade_20070210(my):
my.run_sql('''
alter table texture add column asset_context varchar(30);
''')
def upgrade_20070206(my):
my.run_sql('''
insert into prod_setting (key, value, description, type, search_type) values ('bin_label', 'client|review', 'Types of bins', 'sequence', 'prod/bin');
insert into prod_setting (key, value, description, type, search_type) values ('notes_preprod_context', 'client kick off|internal kick off', 'Types of reproduction notes', 'sequence', 'prod/shot');
''')
def upgrade_20070203(my):
my.run_sql('''
CREATE TABLE camera (
id serial PRIMARY KEY,
shot_code varchar(30),
description text,
"timestamp" timestamp default now(),
s_status varchar(30)
);
''')
"""
my.run_sql('''
CREATE TABLE camera (
id serial PRIMARY KEY,
shot_code varchar(30),
lab_roll varchar(30),
roll varchar(30),
height varchar(30),
distance varchar(30),
aspect_ratio varchar(30),
fps varchar(30),
filter varchar(30),
focus varchar(256),
slate varchar(30),
perf int2,
take int2,
"timestamp" timestamp default now(),
s_status varchar(30)
);
''')
"""
def upgrade_20070129(my):
my.run_sql('''
ALTER TABLE storyboard ADD CONSTRAINT shot_code_fkey FOREIGN KEY (shot_code) REFERENCES shot(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE instance DROP constraint "$1";
ALTER TABLE instance DROP constraint "$2";
ALTER TABLE instance ADD CONSTRAINT asset_code_fkey FOREIGN KEY (asset_code) REFERENCES asset(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE instance ADD CONSTRAINT shot_code_fkey FOREIGN KEY (shot_code) REFERENCES shot(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE plate ADD CONSTRAINT shot_code_fkey FOREIGN KEY (shot_code) REFERENCES shot(code) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE INITIALLY DEFERRED;
''')
def upgrade_20070126(my):
my.run_sql('''
alter table bin add column label varchar(100);
''')
def upgrade_20070123(my):
my.run_sql('''
INSERT INTO prod_setting (key,value,type,search_type, description) values ('texture/category','texture|mattepainting|concept', 'sequence', 'prod/texture', 'Various Types of 2D Assets')
''')
def upgrade_20070117(my):
my.run_sql('''
ALTER TABLE shot ADD COLUMN complexity int2;
INSERT INTO prod_setting (key, value, description, type, search_type)
VALUES ('bin_type', 'anim|tech|review|final|client', 'The different type of bins', 'sequence', 'prod/bin');
''')
def upgrade_20070114(my):
my.run_sql('''
CREATE TABLE plate (
id serial PRIMARY KEY,
shot_code varchar(30),
type varchar(30),
"timestamp" timestamp default now(),
s_status varchar(30)
);
''')
def upgrade_20070112(my):
my.run_sql('''
ALTER TABLE script DROP COLUMN episode_code;
ALTER TABLE script DROP COLUMN artist;
ALTER TABLE script ADD COLUMN code varchar(30);
ALTER TABLE script ADD COLUMN description text;
''')
def upgrade_20061222(my):
my.run_sql('''
-- add some columns to the prod_setting table
ALTER TABLE prod_setting ADD COLUMN description text;
ALTER TABLE prod_setting ADD COLUMN type varchar(30);
ALTER TABLE prod_setting ADD COLUMN search_type varchar(200);
''')
def upgrade_20061219(my):
my.run_sql('''
-- add a sort order to shot
ALTER TABLE shot ADD COLUMN sort_order int2;
''')
def upgrade_20061207(my):
my.run_sql('''
-- add a submission table for editorial purpose
CREATE TABLE submission (
id serial PRIMARY KEY,
search_type varchar(200),
search_id int4,
snapshot_code varchar(30),
context varchar(100),
version int4,
description text,
login varchar(30),
"timestamp" timestamp default now(),
s_status varchar(30)
);
CREATE TABLE bin (
id serial PRIMARY KEY,
code varchar(256),
description text,
type varchar(100),
s_status varchar(30)
);
CREATE TABLE submission_in_bin
(
id serial PRIMARY KEY,
submission_id int4 NOT NULL,
bin_id int4 NOT NULL,
CONSTRAINT submission_in_bin_bin_id_fkey FOREIGN KEY (bin_id)
REFERENCES bin (id)
ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT submission_in_bin_submission_id_fkey FOREIGN KEY (submission_id)
REFERENCES submission (id)
ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
);
alter table submission_in_bin add constraint submission_id_bin_id_key
UNIQUE (submission_id, bin_id);
''')
def upgrade_20061123(my):
my.run_sql('''
-- add a sort order to sequences
alter table sequence add column sort_order int2;
-- drop the episode and artist constraint to storyboards
alter table storyboard drop column episode_code;
alter table storyboard drop column artist;
alter table storyboard add column code varchar(30);
alter table storyboard add column shot_code varchar(30);
alter table storyboard add column description text;
'''
)
def upgrade_20061122(my):
my.run_sql('''
-- now centralised in sthpw database
drop table timecard;
-- change to pipeline code
alter table asset rename column pipeline to pipeline_code;
''')
def upgrade_20061110(my):
my.run_sql('''
alter table asset rename column pipeline to pipeline_code;
''')
def upgrade_20061109(my):
my.run_sql('''
alter table shot add column sequence_code varchar(30);
''')
def upgrade_20061102(my):
my.run_sql('''
-- add a keywords column to the art_reference table --
alter table art_reference add column keywords text;
''')
def upgrade_20061025(my):
my.run_sql('''
-- add a must-have entry set_item to asset_type
INSERT INTO asset_type (code, description) VALUES('set_item',
'an item in a set');
''')
def upgrade_20060907(my):
my.run_sql('''
ALTER TABLE texture ADD COLUMN pipeline varchar(30);
''')
|
sadanandb/pmt
|
src/pyasm/search/upgrade/project/prod_upgrade.py
|
Python
|
epl-1.0
| 27,273
|
from baseneuron import BaseNeuron
import numpy as np
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
cuda_src = """
// %(type)s and %(nneu)d must be replaced using Python string foramtting
#define NNEU %(nneu)d
__global__ void leaky_iaf(
int neu_num,
%(type)s dt,
int *spk,
%(type)s *V,
%(type)s *I,
%(type)s *Vt,
%(type)s *Vr,
%(type)s *R,
%(type)s *C,
%(type)s *B)
{
int bid = blockIdx.x;
int nid = bid * NNEU + threadIdx.x;
%(type)s v,i,r,c,b,vr,vt;
int spked = 0;
if( nid < neu_num ){
r = R[nid];
c = C[nid];
// update v
%(type)s bh = exp( -dt/r/c );
v = V[nid];
i = I[nid];
b = B[nid];
vr = Vr[nid];
v = v*bh + (r*(i+b)+vr)*(1.0-bh);
// spike detection
vt = Vt[nid];
if( v >= vt ){
v = vr;
spked = 1;
}
V[nid] = v;
spk[nid] = spked;
}
return;
}
"""
class LeakyIAF_bias(BaseNeuron):
def __init__(self, n_dict, spk, dt, debug=False):
self.num_neurons = len(n_dict['id'])
self.dt = np.double(dt)
self.steps = 1
self.debug = debug
self.idx =n_dict['id']
self.Vr = garray.to_gpu( np.asarray( n_dict['Vr'], dtype=np.float64 ))
self.Vt = garray.to_gpu( np.asarray( n_dict['Vt'], dtype=np.float64 ))
self.C = garray.to_gpu( np.asarray( n_dict['C'], dtype=np.float64 ))
self.R = garray.to_gpu( np.asarray( n_dict['R'], dtype=np.float64 ))
self.V = garray.to_gpu( np.asarray( n_dict['V'], dtype=np.float64 ))
self.b = garray.to_gpu( np.asarray( n_dict['b'], dtype=np.float64 ))
self.spk = spk
_num_dendrite_cond = np.asarray([n_dict['num_dendrites_cond'][i] \
for i in range(self.num_neurons)], \
dtype=np.int32).flatten()
_num_dendrite = np.asarray([n_dict['num_dendrites_I'][i] \
for i in range(self.num_neurons)], \
dtype=np.int32).flatten()
self._cum_num_dendrite = garray.to_gpu(np.concatenate(( \
np.asarray([0,], dtype=np.int32), \
np.cumsum(_num_dendrite, dtype=np.int32))))
self._cum_num_dendrite_cond = garray.to_gpu(np.concatenate(( \
np.asarray([0,], dtype=np.int32), \
np.cumsum(_num_dendrite_cond, dtype=np.int32))))
self._num_dendrite = garray.to_gpu(_num_dendrite)
self._num_dendrite_cond = garray.to_gpu(_num_dendrite_cond)
self._pre = garray.to_gpu(np.asarray(n_dict['I_pre'], dtype=np.int32))
self._cond_pre = garray.to_gpu(np.asarray(n_dict['cond_pre'],
dtype=np.int32))
self._V_rev = garray.to_gpu(np.asarray(n_dict['reverse'],
dtype=np.double))
self.I = garray.zeros(self.num_neurons, np.double)
self._update_I_cond = self._get_update_I_cond_func()
self._update_I_non_cond = self._get_update_I_non_cond_func()
self.update = self.get_gpu_kernel()
@property
def neuron_class(self): return True
def eval( self, st = None):
self.update.prepared_async_call(\
self.gpu_grid,\
self.gpu_block,\
st,\
self.num_neurons,\
self.dt,\
self.spk,\
self.V.gpudata,\
self.I.gpudata,\
self.Vt.gpudata,\
self.Vr.gpudata,\
self.R.gpudata,\
self.C.gpudata,\
self.b.gpudata)
def get_gpu_kernel( self):
self.gpu_block = (128,1,1)
self.gpu_grid = ((self.num_neurons - 1) / self.gpu_block[0] + 1, 1)
#cuda_src = open( './leaky_iaf.cu','r')
mod = SourceModule( \
cuda_src % {"type": dtype_to_ctype(np.float64),\
"nneu": self.gpu_block[0] },\
options=["--ptxas-options=-v"])
func = mod.get_function("leaky_iaf")
func.prepare('idPPPPPPPP')# [ np.int32, # neu_num
# np.float64, # dt
# np.intp, # spk array
# np.intp, # V array
# np.intp, # I array
# np.intp, # Vt array
# np.intp, # Vr array
# np.intp, # R array
# np.intp, # C array
# np.intp]) # b array
return func
@property
def update_I_override(self): return True
def update_I(self, synapse_state, st=None):
self.I.fill(0)
if self._pre.size>0:
self._update_I_non_cond.prepared_async_call(self._grid_get_input,\
self._block_get_input, st, int(synapse_state), \
self._cum_num_dendrite.gpudata, self._num_dendrite.gpudata, self._pre.gpudata,
self.I.gpudata)
if self._cond_pre.size>0:
self._update_I_cond.prepared_async_call(self._grid_get_input,\
self._block_get_input, st, int(synapse_state), \
self._cum_num_dendrite_cond.gpudata, self._num_dendrite_cond.gpudata,
self._cond_pre.gpudata, self.I.gpudata, self.V.gpudata, \
self._V_rev.gpudata)
def _get_update_I_cond_func(self):
template = """
#define N 32
#define NUM_NEURONS %(num_neurons)d
__global__ void get_input(double* synapse, int* cum_num_dendrite, int* num_dendrite, int* pre, double* I_pre, double* V, double* V_rev)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int bid = blockIdx.x;
int neuron;
__shared__ int num_den[32];
__shared__ int den_start[32];
__shared__ double V_in[32];
__shared__ double input[32][33];
if(tidy == 0)
{
neuron = bid * N + tidx;
if(neuron < NUM_NEURONS)
{
num_den[tidx] = num_dendrite[neuron];
V_in[tidx] = V[neuron];
}
}else if(tidy == 1)
{
neuron = bid * N + tidx;
if(neuron < NUM_NEURONS)
{
den_start[tidx] = cum_num_dendrite[neuron];
}
}
input[tidy][tidx] = 0.0;
__syncthreads();
neuron = bid * N + tidy;
if(neuron < NUM_NEURONS)
{
int n_den = num_den[tidy];
int start = den_start[tidy];
double VV = V_in[tidy];
for(int i = tidx; i < n_den; i += N)
{
input[tidy][tidx] += synapse[pre[start + i]] * (VV - V_rev[start + i]);
}
}
__syncthreads();
if(tidy < 8)
{
input[tidx][tidy] += input[tidx][tidy + 8];
input[tidx][tidy] += input[tidx][tidy + 16];
input[tidx][tidy] += input[tidx][tidy + 24];
}
__syncthreads();
if(tidy < 4)
{
input[tidx][tidy] += input[tidx][tidy + 4];
}
__syncthreads();
if(tidy < 2)
{
input[tidx][tidy] += input[tidx][tidy + 2];
}
__syncthreads();
if(tidy == 0)
{
input[tidx][0] += input[tidx][1];
neuron = bid*N + tidx;
if(neuron < NUM_NEURONS)
{
I_pre[neuron] -= input[tidx][0];
}
}
}
//can be improved
"""
mod = SourceModule(template % {"num_neurons": self.num_neurons}, options = ["--ptxas-options=-v"])
func = mod.get_function("get_input")
func.prepare('PPPPPPP')#[np.intp, np.intp, np.intp, np.intp, np.intp, np.intp, np.intp])
self._block_get_input = (32,32,1)
self._grid_get_input = ((self.num_neurons - 1) / 32 + 1, 1)
return func
def _get_update_I_non_cond_func(self):
template = """
#define N 32
#define NUM_NEURONS %(num_neurons)d
__global__ void get_input(double* synapse, int* cum_num_dendrite, int* num_dendrite, int* pre, double* I_pre)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int bid = blockIdx.x;
int neuron;
__shared__ int num_den[32];
__shared__ int den_start[32];
__shared__ double input[32][33];
if(tidy == 0)
{
neuron = bid * N + tidx;
if(neuron < NUM_NEURONS)
{
num_den[tidx] = num_dendrite[neuron];
}
}else if(tidy == 1)
{
neuron = bid * N + tidx;
if(neuron < NUM_NEURONS)
{
den_start[tidx] = cum_num_dendrite[neuron];
}
}
input[tidy][tidx] = 0.0;
__syncthreads();
int n_den = num_den[tidy];
int start = den_start[tidy];
for(int i = tidx; i < n_den; i += N)
{
input[tidy][tidx] += synapse[pre[start] + i];
}
__syncthreads();
if(tidy < 8)
{
input[tidx][tidy] += input[tidx][tidy + 8];
input[tidx][tidy] += input[tidx][tidy + 16];
input[tidx][tidy] += input[tidx][tidy + 24];
}
__syncthreads();
if(tidy < 4)
{
input[tidx][tidy] += input[tidx][tidy + 4];
}
__syncthreads();
if(tidy < 2)
{
input[tidx][tidy] += input[tidx][tidy + 2];
}
__syncthreads();
if(tidy == 0)
{
input[tidx][0] += input[tidx][1];
if(neuron < NUM_NEURONS)
{
I_pre[neuron] += input[tidx][0];
}
}
}
//can be improved
"""
mod = SourceModule(template % {"num_neurons": self.num_neurons}, options = ["--ptxas-options=-v"])
func = mod.get_function("get_input")
func.prepare('PPPPP')#[np.intp, np.intp, np.intp, np.intp, np.intp])
return func
|
cerrno/neurokernel
|
neurokernel/LPU/neurons/LeakyIAF_bias.py
|
Python
|
bsd-3-clause
| 11,076
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db.models import Q, QuerySet
from django.http import HttpRequest
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djangorestframework_camel_case.render import CamelCaseJSONRenderer
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from astrobin.api2.serializers.gear_migration_strategy_serializer import GearMigrationStrategySerializer
from astrobin.models import GearMigrationStrategy
from astrobin_apps_equipment.api.permissions.is_equipment_moderator_or_read_only import IsEquipmentModeratorOrReadOnly
from astrobin_apps_notifications.utils import build_notification_url, push_notification
from common.services import AppRedirectionService
class GearMigrationStrategyViewSet(viewsets.ModelViewSet):
serializer_class = GearMigrationStrategySerializer
renderer_classes = [BrowsableAPIRenderer, CamelCaseJSONRenderer]
permission_classes = [IsEquipmentModeratorOrReadOnly]
http_method_names = ['get', 'head', 'options', 'put']
def get_queryset(self) -> QuerySet:
queryset: QuerySet = GearMigrationStrategy.objects.all()
if self.request.query_params.get('pending-review', 'false').lower() in ('1', 'true', 'yes'):
queryset = queryset.filter(
Q(migration_flag_reviewer=None) &
Q(
Q(migration_flag_reviewer_lock__isnull=True) |
Q(migration_flag_reviewer_lock=self.request.user)
) &
~Q(migration_flag_moderator=self.request.user)
)
return queryset
@action(detail=True, methods=['put'], url_path='lock-for-migration-review')
def lock_for_migration_review(self, request: HttpRequest, pk: int) -> Response:
if not request.user.groups.filter(name='equipment_moderators').exists():
raise PermissionDenied(request.user)
strategy: GearMigrationStrategy = self.get_object()
if strategy.gear.migration_flag_moderator_lock not in (None, request.user):
return Response(status=409)
if strategy.migration_flag_reviewer_lock not in (None, request.user):
return Response(status=409)
strategy.migration_flag_reviewer_lock = request.user
strategy.migration_flag_reviewer_lock_timestamp = timezone.now()
strategy.save()
serializer = self.get_serializer(strategy)
return Response(serializer.data)
@action(detail=True, methods=['put'], url_path='release-lock-for-migration-review')
def release_lock_for_migration_review(self, request: HttpRequest, pk: int) -> Response:
if not request.user.groups.filter(name='equipment_moderators').exists():
raise PermissionDenied(request.user)
strategy: GearMigrationStrategy = self.get_object()
if strategy.migration_flag_reviewer_lock not in (None, request.user):
raise PermissionDenied
strategy.migration_flag_reviewer_lock = None
strategy.migration_flag_reviewer_lock_timestamp = None
strategy.save()
serializer = self.get_serializer(strategy)
return Response(serializer.data)
@action(detail=True, methods=['put'])
def approve(self, request, pk):
if not request.user.groups.filter(name='equipment_moderators').exists():
raise PermissionDenied(request.user)
strategy: GearMigrationStrategy = self.get_object()
if strategy.migration_flag is None:
return Response(status=409)
if request.user == strategy.migration_flag_moderator:
raise PermissionDenied
strategy.migration_flag_reviewer = request.user
strategy.migration_flag_reviewer_decision = 'APPROVED'
strategy.migration_flag_reviewer_rejection_comment = None
strategy.migration_flag_reviewer_lock = None
strategy.migration_flag_reviewer_lock_timestamp = None
strategy.save()
strategy.gear.migration_flag_moderator_lock = None
strategy.gear.migration_flag_moderator_lock_timestamp = None
strategy.gear.save()
target = strategy.migration_content_object
push_notification(
[strategy.migration_flag_moderator],
request.user,
'equipment-item-migration-approved',
{
'user': request.user.userprofile.get_display_name(),
'user_url': build_notification_url(
settings.BASE_URL + reverse('user_page', args=(request.user.username,))
),
'migration_flag': strategy.migration_flag,
'reason': request.data.get('reason'),
'comment': request.data.get('comment'),
'legacy_item': strategy.gear,
'target_item': f'{target.brand.name if target.brand else _("(DIY)")} {target.name}' if target else None,
'target_url': build_notification_url(
AppRedirectionService.redirect(
f'/equipment'
f'/explorer'
f'/{target.item_type}/{target.pk}'
f'/{target.slug}'
)
) if target else None,
}
)
serializer = self.get_serializer(strategy)
return Response(serializer.data)
@action(detail=True, methods=['put'])
def reject(self, request, pk):
if not request.user.groups.filter(name='equipment_moderators').exists():
raise PermissionDenied(request.user)
strategy: GearMigrationStrategy = self.get_object()
if strategy.migration_flag is None:
return Response(status=409)
if request.user == strategy.migration_flag_moderator:
raise PermissionDenied
target = strategy.migration_content_object
push_notification(
[strategy.migration_flag_moderator],
request.user,
'equipment-item-migration-rejected',
{
'user': request.user.userprofile.get_display_name(),
'user_url': build_notification_url(
settings.BASE_URL + reverse('user_page', args=(request.user.username,))
),
'migration_flag': strategy.migration_flag,
'reason': request.data.get('reason'),
'comment': request.data.get('comment'),
'legacy_item': strategy.gear,
'target_item': f'{target.brand.name if target.brand else _("(DIY)")} {target.name}' if target else None,
'target_url': build_notification_url(
AppRedirectionService.redirect(
f'/equipment'
f'/explorer'
f'/{target.item_type}/{target.pk}'
f'/{target.slug}'
)
) if target else None,
'migration_tool_url': build_notification_url(
AppRedirectionService.redirect(
f'/equipment'
f'/migration-tool'
)
) if target else None,
}
)
strategy.gear.migration_flag_moderator_lock = None
strategy.gear.migration_flag_moderator_lock_timestamp = None
strategy.gear.save()
strategy.delete()
serializer = self.get_serializer(strategy)
return Response(serializer.data)
|
astrobin/astrobin
|
astrobin/api2/views/gear_migration_strategy_view_set.py
|
Python
|
agpl-3.0
| 7,751
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import argparse
import logging
import os
import sys
import alot
from alot.settings import settings
from alot.settings.errors import ConfigError
from alot.db.manager import DBManager
from alot.ui import UI
from alot.commands import *
from alot.commands import CommandParseError, COMMANDS
from alot.utils import argparse as cargparse
_SUBCOMMANDS = ['search', 'compose', 'bufferlist', 'taglist', 'pyshell']
def parser():
"""Parse command line arguments, validate them, and return them."""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version',
version=alot.__version__)
parser.add_argument('-r', '--read-only', action='store_true',
help='open db in read only mode')
parser.add_argument('-c', '--config',
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_file,
help='config file')
parser.add_argument('-n', '--notmuch-config', default=os.environ.get(
'NOTMUCH_CONFIG',
os.path.expanduser('~/.notmuch-config')),
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_file,
help='notmuch config')
parser.add_argument('-C', '--colour-mode',
choices=(1, 16, 256), type=int, default=256,
help='terminal colour mode [default: %(default)s].')
parser.add_argument('-p', '--mailindex-path',
action=cargparse.ValidatedStoreAction,
validator=cargparse.require_dir,
help='path to notmuch index')
parser.add_argument('-d', '--debug-level', default='info',
choices=('debug', 'info', 'warning', 'error'),
help='debug log [default: %(default)s]')
parser.add_argument('-l', '--logfile', default='/dev/null',
action=cargparse.ValidatedStoreAction,
validator=cargparse.optional_file_like,
help='logfile [default: %(default)s]')
# We will handle the subcommands in a seperate run of argparse as argparse
# does not support optional subcommands until now.
parser.add_argument('command', nargs=argparse.REMAINDER,
help='possible subcommands are {}'.format(
', '.join(_SUBCOMMANDS)))
options = parser.parse_args()
if options.command:
# We have a command after the initial options so we also parse that.
# But we just use the parser that is already defined for the internal
# command that will back this subcommand.
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand')
for subcommand in _SUBCOMMANDS:
subparsers.add_parser(subcommand,
parents=[COMMANDS['global'][subcommand][1]])
command = parser.parse_args(options.command)
else:
command = None
return options, command
def main():
"""The main entry point to alot. It parses the command line and prepares
for the user interface main loop to run."""
options, command = parser()
# logging
root_logger = logging.getLogger()
for log_handler in root_logger.handlers:
root_logger.removeHandler(log_handler)
root_logger = None
numeric_loglevel = getattr(logging, options.debug_level.upper(), None)
logformat = '%(levelname)s:%(module)s:%(message)s'
logging.basicConfig(level=numeric_loglevel, filename=options.logfile,
filemode='w', format=logformat)
# locate alot config files
if options.config is None:
alotconfig = os.path.join(
os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
'alot', 'config')
if not os.path.exists(alotconfig):
alotconfig = None
else:
alotconfig = options.config
try:
settings.read_config(alotconfig)
settings.read_notmuch_config(options.notmuch_config)
except (ConfigError, OSError, IOError) as e:
sys.exit(e)
# store options given by config swiches to the settingsManager:
if options.colour_mode:
settings.set('colourmode', options.colour_mode)
# get ourselves a database manager
indexpath = settings.get_notmuch_setting('database', 'path')
indexpath = options.mailindex_path or indexpath
dbman = DBManager(path=indexpath, ro=options.read_only)
# determine what to do
if command is None:
try:
cmdstring = settings.get('initial_command')
except CommandParseError as err:
sys.exit(err)
elif command.subcommand in _SUBCOMMANDS:
cmdstring = ' '.join(options.command)
# set up and start interface
UI(dbman, cmdstring)
# run the exit hook
exit_hook = settings.get_hook('exit')
if exit_hook is not None:
exit_hook()
if __name__ == "__main__":
main()
|
fnurl/alot
|
alot/__main__.py
|
Python
|
gpl-3.0
| 5,368
|
# Chris Bugg
# 10/1/14
# NOTE: Runs on Python 2.7.6
# UPDATE:
# 10/10/14
# -> Now runs with 8 sub-processes using
# the [a-z,A-Z,0-9] alphabet
#
# 10/12/2014 UPDATE: Ubuntu 14.04, OS X, and Windows 7 have commited to this project by Chris H
# Ubuntu GUI ran 1.71 million hashes per minute on Overclocked Tower 4.0GHz
# Ubuntu Terminal ran 1.81 million hashes per minute on Overclocked tower 4.0GHz
# Windows 7 ERROR!!! does not run code correctly. Didnt even try to crack the hash
# OS X ran 371,000 hashes per second
# 10/13/14
# -> Now runs on Windows systems
# Put another lock on subprocess progress display to solve overwriting on Windows
# (Latest_Stable_Versions)
# 10/24/2013
# Replaced the hashing code with an optimized version, showing around 17% speedup.
# Seeing 1690716 hashes per second on my laptop, up from around 1430000.
# Nick Baum
# 10/27/2014
# The code will work great on any Linux OS running python (can use something better than Ubuntu)
# Tested the code on my laptop and desktop for Chris's chart.
# Laptop running OSX 10.9 i5 @ 2.53GHz 8gb ram, took 388.63 seconds with 383,016 hashes per second
# Set up desktop tower with intel Dual Core @ 1.8GHz, 4GB ram, running centos, 431.9 secons with 273,697 hps
# 11/5/2014
# Replaced the numbered process and chunk variables with arrays of the same. Now automatically spawns the correct
# number of proceses and chunks as needed for that number.
# Nick Baum
import hashlib
from time import time
from multiprocessing import Process, Pipe, Lock, cpu_count
import os
class DemoCrack():
algorithm = "sha256"
origHash = ''
alphabet = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") #added by chris h
chunks = []
key = ''
alphaChoice = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" # changed by chris h
countey = 0
number_of_processes = cpu_count()
def __init__(self):
#Code to fix the windows errors
"""
Details:
This 'catches' the sub-processes on windows
so they don't execute the constructor, cause
Windows processes are stupid.
"""
if not __name__ == '__main__':
return
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.getHash()
os.system('cls' if os.name == 'nt' else 'clear')
self.whatWeGot()
self.chunkIt()
start = time()
self.countey += 1
lock = Lock()
parentPipe, childPipe = Pipe()
children = []
for i in range(0, self.number_of_processes):
children.append(Process(target=self.subProcess, args=(childPipe, lock, )))
children[i].start()
for chunk in self.chunks:
parentPipe.send("6")
parentPipe.send(chunk)
count = 0
done = False
rec = 0
while not done:
if count > (self.number_of_processes - 1):
for i in range(0, self.number_of_processes):
children[i].join()
print "No Dice!"
done = True
else:
rec = parentPipe.recv()
if rec == "found":
self.countey = parentPipe.recv()
for i in range(0, self.number_of_processes):
children[i].terminate()
done = True
count += 1
elapsed = (time() - start)
print "That took: ", elapsed, " seconds."
speed = (self.number_of_processes * int(self.countey)) / elapsed
if rec == "found":
print "At about: ", speed, " hashes per second."
exit = raw_input("Hit (Enter/Return) to quit ")
def subProcess(self, pipe, lock):
lock.acquire()
loops = pipe.recv()
alphabet = pipe.recv()
lock.release()
if self.looper6(alphabet, lock) == True:
lock.acquire()
pipe.send("found")
pipe.send(self.countey)
pipe.close()
lock. release()
else:
lock.acquire()
pipe.send("not found")
pipe.close()
lock. release()
def chunkIt(self):
chunky = [self.alphabet[i::self.number_of_processes] for i in range(self.number_of_processes)]
for chunk in chunky:
self.chunks.append(chunk)
def getHash(self):
key = raw_input("What's the 6 LowerCase-Letter Key: ")
self.origHash = hashlib.new(self.algorithm, key).hexdigest()
print "The Key you entered was: ", key
print "Which has a hash of: ", self.origHash
def whatWeGot(self):
print "**********************************"
print "Here's what we've got so far: "
print
print "Key is: ", self.key
print "Hash is: ", self.origHash
print "Searching: ", self.alphaChoice
print "**********************************"
def isSolution(self, key):
possible = hashlib.new(self.algorithm, key).hexdigest()
if possible == self.origHash:
print
print "Solution found!"
print "Key is: ", key
print "Which has a hash of: ", possible
return True
else:
return False
def looper6(self, alphabet, lock):
for x in alphabet:
#Processes were overwriting, so locking this print too --Latest_Stable_Versions
lock.acquire()
print "Searching ...", x, "*****"
lock.release()
for y in self.alphabet:
for z in self.alphabet:
for a in self.alphabet:
for b in self.alphabet:
for c in self.alphabet:
self.countey += 1
key = x + y + z + a + b + c
if self.isSolution(key):
return True
return False
DemoCrack()
|
COCS4950G7/COSC4950
|
Source/demoCrack3.py
|
Python
|
gpl-3.0
| 6,305
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
# Brython: asyncio is not available
# import asyncio.events
import collections.abc
import contextlib
import errno
import faulthandler
import fnmatch
import functools
import gc
import glob
import hashlib
import importlib
import importlib.util
import locale
import logging.handlers
import nntplib
import os
import platform
import re
import shutil
import socket
import stat
import struct
import subprocess
import sys
import sysconfig
import tempfile
import _thread
import threading
import time
import types
import unittest
import urllib.error
import warnings
from .testresult import get_test_runner
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
try:
import resource
except ImportError:
resource = None
try:
import _hashlib
except ImportError:
_hashlib = None
__all__ = [
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "TestDidNotRun", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
"unload", "forget",
# io
"record_original_stdout", "get_original_stdout", "captured_stdout",
"captured_stdin", "captured_stderr",
# filesystem
"TESTFN", "SAVEDCWD", "unlink", "rmtree", "temp_cwd", "findfile",
"create_empty_file", "can_symlink", "fs_is_case_insensitive",
# unittest
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "requires_hashdigest",
"check_syntax_error", "check_syntax_warning",
"TransientResource", "time_out", "socket_peer_reset", "ioerror_peer_reset",
"transient_internet", "BasicTestRunner", "run_unittest", "run_doctest",
"skip_unless_symlink", "requires_gzip", "requires_bz2", "requires_lzma",
"bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute",
"requires_IEEE_754", "skip_unless_xattr", "requires_zlib",
"anticipate_failure", "load_package_tests", "detect_api_mismatch",
"check__all__", "skip_unless_bind_unix_socket", "skip_if_buggy_ucrt_strfptime",
"ignore_warnings",
# sys
"is_jython", "is_android", "check_impl_detail", "unix_shell",
"setswitchinterval",
# network
"HOST", "IPV6_ENABLED", "find_unused_port", "bind_port", "open_urlresource",
"bind_unix_socket",
# processes
'temp_umask', "reap_children",
# logging
"TestHandler",
# threads
"threading_setup", "threading_cleanup", "reap_threads", "start_threads",
# miscellaneous
"check_warnings", "check_no_resource_warning", "check_no_warnings",
"EnvironmentVarGuard",
"run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
"run_with_tz", "PGO", "missing_compiler_executable", "fd_count",
"ALWAYS_EQ", "LARGEST", "SMALLEST"
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestDidNotRun(Error):
"""Test did not run any subtests."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect.
"""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def ignore_warnings(*, category):
"""Decorator to suppress deprecation warnings.
Use of context managers to hide warnings make diffs
more noisy and tools like 'git blame' less useful.
"""
def decorator(test):
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=category)
return test(self, *args, **kwargs)
return wrapper
return decorator
def import_module(name, deprecated=False, *, required_on=()):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed. If a module is required on a platform but optional for
others, set required_on to an iterable of platform prefixes which will be
compared against sys.platform.
"""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
if sys.platform.startswith(tuple(required_on)):
raise
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported.
"""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise.
"""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def load_package_tests(pkg_dir, loader, standard_tests, pattern):
"""Generic load_tests implementation for simple test packages.
Most packages can implement load_tests using this function as follows:
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
"""
if pattern is None:
pattern = "test*"
top_dir = os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))) # support
package_tests = loader.discover(start_dir=pkg_dir,
top_level_dir=top_dir,
pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Import and return a module, deliberately bypassing sys.modules.
This function imports and returns a fresh copy of the named Python module
by removing the named module from sys.modules before doing the import.
Note that unlike reload, the original module is not affected by
this operation.
*fresh* is an iterable of additional module names that are also removed
from the sys.modules cache before doing the import.
*blocked* is an iterable of module names that are replaced with None
in the module cache during the import to ensure that attempts to import
them raise ImportError.
The named module and any modules named in the *fresh* and *blocked*
parameters are saved before starting the import and then reinserted into
sys.modules when the fresh import is complete.
Module and package deprecation messages are suppressed during this import
if *deprecated* is True.
This function will raise ImportError if the named module cannot be
imported.
"""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
junit_xml_list = None # list of testsuite XML elements
failfast = False
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def _force_run(path, func, *args):
try:
return func(*args)
except OSError as err:
if verbose >= 2:
print('%s: %s' % (err.__class__.__name__, err))
print('re-run %s%r' % (func.__name__, args))
os.chmod(path, stat.S_IRWXU)
return func(*args)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s" % (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
_force_run(fullname, os.rmdir, fullname)
else:
_force_run(fullname, os.unlink, fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(lambda p: _force_run(p, os.rmdir, p), path)
def _longpath(path):
try:
import ctypes
except ImportError:
# No ctypes means we can't expands paths.
pass
else:
buffer = ctypes.create_unicode_buffer(len(path) * 2)
length = ctypes.windll.kernel32.GetLongPathNameW(path, buffer,
len(buffer))
if length:
return buffer[:length]
return path
else:
_unlink = os.unlink
_rmdir = os.rmdir
def _rmtree(path):
try:
shutil.rmtree(path)
return
except OSError:
pass
def _rmtree_inner(path):
for name in _force_run(path, os.listdir, path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_inner(fullname)
_force_run(path, os.rmdir, fullname)
else:
_force_run(path, os.unlink, fullname)
_rmtree_inner(path)
os.rmdir(path)
def _longpath(path):
return path
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
def make_legacy_pyc(source):
"""Move a PEP 3147/488 pyc file to its legacy pyc location.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147/488 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = importlib.util.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + 'c')
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147/488 or
legacy .pyc files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147/488 and legacy pyc files.
unlink(source + 'c')
for opt in ('', 1, 2):
unlink(importlib.util.cache_from_source(source, optimization=opt))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from tkinter import Tk
root = Tk()
root.withdraw()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_hashdigest(digestname, openssl=None):
"""Decorator raising SkipTest if a hashing algorithm is not available
The hashing algorithm could be missing or blocked by a strict crypto
policy.
If 'openssl' is True, then the decorator checks that OpenSSL provides
the algorithm. Otherwise the check falls back to built-in
implementations.
ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS
ValueError: unsupported hash type md4
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
if openssl and _hashlib is not None:
_hashlib.new(digestname)
else:
hashlib.new(digestname)
except ValueError:
raise unittest.SkipTest(
f"hash digest '{digestname}' is not available."
)
return func(*args, **kwargs)
return wrapper
return decorator
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
with socket.socket(family, socktype) as tempsock:
port = bind_port(tempsock)
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def bind_unix_socket(sock, addr):
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except OSError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_gzip = unittest.skipUnless(gzip, 'requires gzip')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
is_android = hasattr(sys, 'getandroidapilevel')
if sys.platform != 'win32':
unix_shell = '/system/bin/sh' if is_android else '/bin/sh'
else:
unix_shell = None
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# Define the URL of a dedicated HTTP server for the network tests.
# The URL must use clear-text HTTP: no redirection to encrypted HTTPS.
TEST_HTTP_URL = "http://www.pythontest.net"
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
# If Python is set up to use the legacy 'mbcs' in Windows,
# 'replace' error mode is used, and encode() returns b'?'
# for characters missing in the ANSI codepage
if os.fsdecode(os.fsencode(character)) != character:
raise UnicodeError
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name == 'nt':
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
# Set by libregrtest/main.py so we can skip tests that are not
# useful for PGO
PGO = False
# Set by libregrtest/main.py if we are running the extended (time consuming)
# PGO task. If this is True, PGO is also True.
PGO_EXTENDED = False
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to create '
f'temporary directory {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
if dir_created:
pid = os.getpid()
try:
yield path
finally:
# In case the process forks, let only the parent remove the
# directory. The child has a different process id. (bpo-30028)
if dir_created and pid == os.getpid():
rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError as exc:
if not quiet:
raise
warnings.warn(f'tests may fail, unable to change the current working '
f'directory to {path!r}: {exc}',
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
# TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
# TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None:
filename = os.path.join(subdir, filename)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn
return filename
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement, errtext='', *, lineno=None, offset=None):
with testcase.assertRaisesRegex(SyntaxError, errtext) as cm:
compile(statement, '<test string>', 'exec')
err = cm.exception
testcase.assertIsNotNone(err.lineno)
if lineno is not None:
testcase.assertEqual(err.lineno, lineno)
testcase.assertIsNotNone(err.offset)
if offset is not None:
testcase.assertEqual(err.offset, offset)
def check_syntax_warning(testcase, statement, errtext='', *, lineno=1, offset=None):
# Test also that a warning is emitted only once.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', SyntaxWarning)
compile(statement, '<testcase>', 'exec')
testcase.assertEqual(len(warns), 1, warns)
warn, = warns
testcase.assertTrue(issubclass(warn.category, SyntaxWarning), warn.category)
if errtext:
testcase.assertRegex(str(warn.message), errtext)
testcase.assertEqual(warn.filename, '<testcase>')
testcase.assertIsNotNone(warn.lineno)
if lineno is not None:
testcase.assertEqual(warn.lineno, lineno)
# SyntaxWarning should be converted to SyntaxError when raised,
# since the latter contains more information and provides better
# error report.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('error', SyntaxWarning)
check_syntax_error(testcase, statement, errtext,
lineno=lineno, offset=offset)
# No warnings are leaked when a SyntaxError is raised.
testcase.assertEqual(warns, [])
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
if verbose:
print('\tfetching %s ...' % url, file=get_original_stdout())
opener = urllib.request.build_opener()
if gzip:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
f = opener.open(url, timeout=15)
if gzip and f.headers.get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=f)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_no_warnings(testcase, message='', category=Warning, force_gc=False):
"""Context manager to check that no warnings are emitted.
This context manager enables a given warning within its scope
and checks that no warnings are emitted even with that warning
enabled.
If force_gc is True, a garbage collection is attempted before checking
for warnings. This may help to catch warnings emitted when objects
are deleted, such as ResourceWarning.
Other keyword arguments are passed to warnings.filterwarnings().
"""
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always',
message=message,
category=category)
yield
if force_gc:
gc_collect()
testcase.assertEqual(warns, [])
@contextlib.contextmanager
def check_no_resource_warning(testcase):
"""Context manager to check that no ResourceWarning is emitted.
Usage:
with check_no_resource_warning(self):
f = open(...)
...
del f
You must remove the object which may emit ResourceWarning before
the end of the context manager.
"""
with check_no_warnings(testcase, category=ResourceWarning, force_gc=True):
yield
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
def get_socket_conn_refused_errs():
"""
Get the different socket error numbers ('errno') which can be received
when a connection is refused.
"""
errors = [errno.ECONNREFUSED]
if hasattr(errno, 'ENETUNREACH'):
# On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED
errors.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
errors.append(errno.EADDRNOTAVAIL)
if hasattr(errno, 'EHOSTUNREACH'):
# bpo-37583: The destination host cannot be reached
errors.append(errno.EHOSTUNREACH)
if not IPV6_ENABLED:
errors.append(errno.EAFNOSUPPORT)
return errors
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
_header = 'nP'
_align = '0n'
if hasattr(sys, "getobjects"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
with f:
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f,
stderr=subprocess.DEVNULL)
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'size' is a requested size for the test (in arbitrary, test-interpreted
units.) 'memuse' is the number of bytes per unit for the test, or a good
estimate of it. For example, a test that needs two byte buffers, of 4 GiB
each, could be decorated with @bigmemtest(size=_4G, memuse=2).
The 'size' argument is normally passed to the decorated test method as an
extra argument. If 'dry_run' is true, the value passed to the test method
may be less than the requested value. If 'dry_run' is false, it means the
test doesn't support dummy runs when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
runner = get_test_runner(sys.stdout,
verbosity=verbose,
capture_output=(junit_xml_list is not None))
result = runner.run(suite)
if junit_xml_list is not None:
junit_xml_list.append(result.get_xml_element())
if not result.testsRun and not result.skipped:
raise TestDidNotRun
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
# By default, don't filter tests
_match_test_func = None
_match_test_patterns = None
def match_test(test):
# Function used by support.run_unittest() and regrtest --list-cases
if _match_test_func is None:
return True
else:
return _match_test_func(test.id())
def _is_full_match_test(pattern):
# If a pattern contains at least one dot, it's considered
# as a full test identifier.
# Example: 'test.test_os.FileTests.test_access'.
#
# Reject patterns which contain fnmatch patterns: '*', '?', '[...]'
# or '[!...]'. For example, reject 'test_access*'.
return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
def set_match_tests(patterns):
global _match_test_func, _match_test_patterns
if patterns == _match_test_patterns:
# No change: no need to recompile patterns.
return
if not patterns:
func = None
# set_match_tests(None) behaves as set_match_tests(())
patterns = ()
elif all(map(_is_full_match_test, patterns)):
# Simple case: all patterns are full test identifier.
# The test.bisect_cmd utility only uses such full test identifiers.
func = set(patterns).__contains__
else:
regex = '|'.join(map(fnmatch.translate, patterns))
# The search *is* case sensitive on purpose:
# don't use flags=re.IGNORECASE
regex_match = re.compile(regex).match
def match_test_regex(test_id):
if regex_match(test_id):
# The regex matches the whole identifier, for example
# 'test.test_os.FileTests.test_access'.
return True
else:
# Try to match parts of the test identifier.
# For example, split 'test.test_os.FileTests.test_access'
# into: 'test', 'test_os', 'FileTests' and 'test_access'.
return any(map(regex_match, test_id.split(".")))
func = match_test_regex
# Create a copy since patterns can be mutable and so modified later
_match_test_patterns = tuple(patterns)
_match_test_func = func
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_filter_suite(suite, match_test)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
# Rather than trying to enumerate all the cases where docstrings may be
# disabled, we just check for that directly
def _check_docstrings():
"""Just used to check if docstrings are enabled"""
MISSING_C_DOCSTRINGS = (check_impl_detail() and
sys.platform != 'win32' and
not sysconfig.get_config_var('WITH_DOC_STRINGS'))
HAVE_DOCSTRINGS = (_check_docstrings.__doc__ is not None and
not MISSING_C_DOCSTRINGS)
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# Flag used by saved_test_environment of test.libregrtest.save_env,
# to check if a test modified the environment. The flag should be set to False
# before running a new test.
#
# For example, threading_cleanup() sets the flag is the function fails
# to cleanup threads.
environment_altered = False
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
return _thread._count(), threading._dangling.copy()
def threading_cleanup(*original_values):
global environment_altered
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
if not count:
# Display a warning at the first iteration
environment_altered = True
dangling_threads = values[1]
print("Warning -- threading_cleanup() failed to cleanup "
"%s threads (count: %s, dangling: %s)"
% (values[0] - original_values[0],
values[0], len(dangling_threads)),
file=sys.stderr)
for thread in dangling_threads:
print(f"Dangling thread: {thread!r}", file=sys.stderr)
sys.stderr.flush()
# Don't hold references to threads
dangling_threads = None
values = None
time.sleep(0.01)
gc_collect()
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
"""
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
@contextlib.contextmanager
def wait_threads_exit(timeout=60.0):
"""
bpo-31234: Context manager to wait until all threads created in the with
statement exit.
Use _thread.count() to check if threads exited. Indirectly, wait until
threads exit the internal t_bootstrap() C function of the _thread module.
threading_setup() and threading_cleanup() are designed to emit a warning
if a test leaves running threads in the background. This context manager
is designed to cleanup threads started by the _thread.start_new_thread()
which doesn't allow to wait for thread exit, whereas thread.Thread has a
join() method.
"""
old_count = _thread._count()
try:
yield
finally:
start_time = time.monotonic()
deadline = start_time + timeout
while True:
count = _thread._count()
if count <= old_count:
break
if time.monotonic() > deadline:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)
time.sleep(0.010)
gc_collect()
def join_thread(thread, timeout=30.0):
"""Join a thread. Raise an AssertionError if the thread is still alive
after timeout seconds.
"""
thread.join(timeout)
if thread.is_alive():
msg = f"failed to join the thread in {timeout:.1f} seconds"
raise AssertionError(msg)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
global environment_altered
# Need os.waitpid(-1, os.WNOHANG): Windows is not supported
if not (hasattr(os, 'waitpid') and hasattr(os, 'WNOHANG')):
return
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
while True:
try:
# Read the exit status of any child process which already completed
pid, status = os.waitpid(-1, os.WNOHANG)
except OSError:
break
if pid == 0:
break
print("Warning -- reap_children() reaped child process %s"
% pid, file=sys.stderr)
environment_altered = True
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
try:
if unlock:
unlock()
endtime = starttime = time.monotonic()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.monotonic(), 0.01))
started = [t for t in started if t.is_alive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
finally:
started = [t for t in started if t.is_alive()]
if started:
faulthandler.dump_traceback(sys.stdout)
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
The old value (or None if it doesn't exist) will be assigned to the
target of the "as" clause, if there is one.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield real_val
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
if hasattr(obj, attr):
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
The old value (or None if it doesn't exist) will be assigned to the
target of the "as" clause, if there is one.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield real_val
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
if item in obj:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
requires_type_collecting = unittest.skipIf(hasattr(sys, 'getcounts'),
'types are immortal if COUNT_ALLOCS is defined')
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
def optim_args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
optimization settings in sys.flags."""
return subprocess._optim_args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_buggy_ucrt = None
def skip_if_buggy_ucrt_strfptime(test):
"""
Skip decorator for tests that use buggy strptime/strftime
If the UCRT bugs are present time.localtime().tm_zone will be
an empty string, otherwise we assume the UCRT bugs are fixed
See bpo-37552 [Windows] strptime/strftime return invalid
results with UCRT version 17763.615
"""
global _buggy_ucrt
if _buggy_ucrt is None:
if(sys.platform == 'win32' and
locale.getdefaultlocale()[1] == 'cp65001' and
time.localtime().tm_zone == ''):
_buggy_ucrt = True
else:
_buggy_ucrt = False
return unittest.skip("buggy MSVC UCRT strptime/strftime")(test) if _buggy_ucrt else test
class PythonSymlink:
"""Creates a symlink for the current Python executable"""
def __init__(self, link=None):
self.link = link or os.path.abspath(TESTFN)
self._linked = []
self.real = os.path.realpath(sys.executable)
self._also_link = []
self._env = None
self._platform_specific()
def _platform_specific(self):
pass
if sys.platform == "win32":
def _platform_specific(self):
import _winapi
if os.path.lexists(self.real) and not os.path.exists(self.real):
# App symlink appears to not exist, but we want the
# real executable here anyway
self.real = _winapi.GetModuleFileName(0)
dll = _winapi.GetModuleFileName(sys.dllhandle)
src_dir = os.path.dirname(dll)
dest_dir = os.path.dirname(self.link)
self._also_link.append((
dll,
os.path.join(dest_dir, os.path.basename(dll))
))
for runtime in glob.glob(os.path.join(src_dir, "vcruntime*.dll")):
self._also_link.append((
runtime,
os.path.join(dest_dir, os.path.basename(runtime))
))
self._env = {k.upper(): os.getenv(k) for k in os.environ}
self._env["PYTHONHOME"] = os.path.dirname(self.real)
if sysconfig.is_python_build(True):
self._env["PYTHONPATH"] = os.path.dirname(os.__file__)
def __enter__(self):
os.symlink(self.real, self.link)
self._linked.append(self.link)
for real, link in self._also_link:
os.symlink(real, link)
self._linked.append(link)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
for link in self._linked:
try:
os.remove(link)
except IOError as ex:
if verbose:
print("failed to clean up {}: {}".format(link, ex))
def _call(self, python, args, env, returncode):
cmd = [python, *args]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
r = p.communicate()
if p.returncode != returncode:
if verbose:
print(repr(r[0]))
print(repr(r[1]), file=sys.stderr)
raise RuntimeError(
'unexpected return code: {0} (0x{0:08X})'.format(p.returncode))
return r
def call_real(self, *args, returncode=0):
return self._call(self.real, args, None, returncode)
def call_link(self, *args, returncode=0):
return self._call(self.link, args, self._env, returncode)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_dir = tempfile.mkdtemp()
tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(tmp_name, b"trusted.foo", b"42")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match(r"2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
rmdir(tmp_dir)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
def skip_if_pgo_task(test):
"""Skip decorator for tests not run in (non-extended) PGO task"""
ok = not PGO or PGO_EXTENDED
msg = "Not run for (non-extended) PGO task"
return test if ok else unittest.skip(msg)(test)
_bind_nix_socket_error = None
def skip_unless_bind_unix_socket(test):
"""Decorator for tests requiring a functional bind() for unix sockets."""
if not hasattr(socket, 'AF_UNIX'):
return unittest.skip('No UNIX Sockets')(test)
global _bind_nix_socket_error
if _bind_nix_socket_error is None:
path = TESTFN + "can_bind_unix_socket"
with socket.socket(socket.AF_UNIX) as sock:
try:
sock.bind(path)
_bind_nix_socket_error = False
except OSError as e:
_bind_nix_socket_error = e
finally:
unlink(path)
if _bind_nix_socket_error:
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
return unittest.skip(msg)(test)
else:
return test
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
def detect_api_mismatch(ref_api, other_api, *, ignore=()):
"""Returns the set of items in ref_api not in other_api, except for a
defined list of items to be ignored in this check.
By default this skips private attributes beginning with '_' but
includes all magic methods, i.e. those starting and ending in '__'.
"""
missing_items = set(dir(ref_api)) - set(dir(other_api))
if ignore:
missing_items -= set(ignore)
missing_items = set(m for m in missing_items
if not m.startswith('_') or m.endswith('__'))
return missing_items
def check__all__(test_case, module, name_of_module=None, extra=(),
blacklist=()):
"""Assert that the __all__ variable of 'module' contains all public names.
The module's public names (its API) are detected automatically based on
whether they match the public name convention and were defined in
'module'.
The 'name_of_module' argument can specify (as a string or tuple thereof)
what module(s) an API could be defined in in order to be detected as a
public API. One case for this is when 'module' imports part of its public
API from other modules, possibly a C backend (like 'csv' and its '_csv').
The 'extra' argument can be a set of names that wouldn't otherwise be
automatically detected as "public", like objects without a proper
'__module__' attribute. If provided, it will be added to the
automatically detected ones.
The 'blacklist' argument can be a set of names that must not be treated
as part of the public API even though their names indicate otherwise.
Usage:
import bar
import foo
import unittest
from test import support
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, foo)
class OtherTestCase(unittest.TestCase):
def test__all__(self):
extra = {'BAR_CONST', 'FOO_CONST'}
blacklist = {'baz'} # Undocumented name.
# bar imports part of its API from _bar.
support.check__all__(self, bar, ('bar', '_bar'),
extra=extra, blacklist=blacklist)
"""
if name_of_module is None:
name_of_module = (module.__name__, )
elif isinstance(name_of_module, str):
name_of_module = (name_of_module, )
expected = set(extra)
for name in dir(module):
if name.startswith('_') or name in blacklist:
continue
obj = getattr(module, name)
if (getattr(obj, '__module__', None) in name_of_module or
(not hasattr(obj, '__module__') and
not isinstance(obj, types.ModuleType))):
expected.add(name)
test_case.assertCountEqual(module.__all__, expected)
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
old_modes = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
# Suppress assert dialogs in debug builds
# (see http://bugs.python.org/issue23314)
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
self.old_modes = {}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
old_mode = msvcrt.CrtSetReportMode(report_type,
msvcrt.CRTDBG_MODE_FILE)
old_file = msvcrt.CrtSetReportFile(report_type,
msvcrt.CRTDBG_FILE_STDERR)
self.old_modes[report_type] = old_mode, old_file
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
cmd = ['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with proc:
stdout = proc.communicate()[0]
if stdout.strip() == b'developer':
print("this test triggers the Crash Reporter, "
"that is intentional", end='', flush=True)
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
if self.old_modes:
import msvcrt
for report_type, (old_mode, old_file) in self.old_modes.items():
msvcrt.CrtSetReportMode(report_type, old_mode)
msvcrt.CrtSetReportFile(report_type, old_file)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
def run_in_subinterp(code):
"""
Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc
module is enabled.
"""
# Issue #10915, #15751: PyGILState_*() functions don't work with
# sub-interpreters, the tracemalloc module uses these functions internally
try:
import tracemalloc
except ImportError:
pass
else:
if tracemalloc.is_tracing():
raise unittest.SkipTest("run_in_subinterp() cannot be used "
"if tracemalloc module is tracing "
"memory allocations")
import _testcapi
return _testcapi.run_in_subinterp(code)
def check_free_after_iterating(test, iter, cls, args=()):
class A(cls):
def __del__(self):
nonlocal done
done = True
try:
next(it)
except StopIteration:
pass
done = False
it = iter(A(*args))
# Issue 26494: Shouldn't crash
test.assertRaises(StopIteration, next, it)
# The sequence should be deallocated just after the end of iterating
gc_collect()
test.assertTrue(done)
def missing_compiler_executable(cmd_names=[]):
"""Check if the compiler components used to build the interpreter exist.
Check for the existence of the compiler executables whose names are listed
in 'cmd_names' or all the compiler executables when 'cmd_names' is empty
and return the first missing executable or None when none is found
missing.
"""
from distutils import ccompiler, sysconfig, spawn
compiler = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
for name in compiler.executables:
if cmd_names and name not in cmd_names:
continue
cmd = getattr(compiler, name)
if cmd_names:
assert cmd is not None, \
"the '%s' executable is not configured" % name
elif not cmd:
continue
if spawn.find_executable(cmd[0]) is None:
return cmd[0]
_is_android_emulator = None
def setswitchinterval(interval):
# Setting a very low gil interval on the Android emulator causes python
# to hang (issue #26939).
minimum_interval = 1e-5
if is_android and interval < minimum_interval:
global _is_android_emulator
if _is_android_emulator is None:
_is_android_emulator = (subprocess.check_output(
['getprop', 'ro.kernel.qemu']).strip() == b'1')
if _is_android_emulator:
interval = minimum_interval
return sys.setswitchinterval(interval)
@contextlib.contextmanager
def disable_faulthandler():
# use sys.__stderr__ instead of sys.stderr, since regrtest replaces
# sys.stderr with a StringIO which has no file descriptor when a test
# is run with -W/--verbose3.
fd = sys.__stderr__.fileno()
is_enabled = faulthandler.is_enabled()
try:
faulthandler.disable()
yield
finally:
if is_enabled:
faulthandler.enable(file=fd, all_threads=True)
def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Subtract one because listdir() internally opens a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except FileNotFoundError:
pass
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count
class SaveSignals:
"""
Save and restore signal handlers.
This class is only able to save/restore signal handlers registered
by the Python signal module: see bpo-13285 for "external" signal
handlers.
"""
def __init__(self):
import signal
self.signal = signal
self.signals = signal.valid_signals()
# SIGKILL and SIGSTOP signals cannot be ignored nor caught
for signame in ('SIGKILL', 'SIGSTOP'):
try:
signum = getattr(signal, signame)
except AttributeError:
continue
self.signals.remove(signum)
self.handlers = {}
def save(self):
for signum in self.signals:
handler = self.signal.getsignal(signum)
if handler is None:
# getsignal() returns None if a signal handler was not
# registered by the Python signal module,
# and the handler is not SIG_DFL nor SIG_IGN.
#
# Ignore the signal: we cannot restore the handler.
continue
self.handlers[signum] = handler
def restore(self):
for signum, handler in self.handlers.items():
self.signal.signal(signum, handler)
def with_pymalloc():
import _testcapi
return _testcapi.WITH_PYMALLOC
class FakePath:
"""Simple implementing of the path protocol.
"""
def __init__(self, path):
self.path = path
def __repr__(self):
return f'<FakePath {self.path!r}>'
def __fspath__(self):
if (isinstance(self.path, BaseException) or
isinstance(self.path, type) and
issubclass(self.path, BaseException)):
raise self.path
else:
return self.path
class _ALWAYS_EQ:
"""
Object that is equal to anything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
ALWAYS_EQ = _ALWAYS_EQ()
@functools.total_ordering
class _LARGEST:
"""
Object that is greater than anything (except itself).
"""
def __eq__(self, other):
return isinstance(other, _LARGEST)
def __lt__(self, other):
return False
LARGEST = _LARGEST()
@functools.total_ordering
class _SMALLEST:
"""
Object that is less than anything (except itself).
"""
def __eq__(self, other):
return isinstance(other, _SMALLEST)
def __gt__(self, other):
return False
SMALLEST = _SMALLEST()
def maybe_get_event_loop_policy():
"""Return the global event loop policy if one is set, else return None."""
return asyncio.events._event_loop_policy
# Helpers for testing hashing.
NHASHBITS = sys.hash_info.width # number of bits in hash() result
assert NHASHBITS in (32, 64)
# Return mean and sdev of number of collisions when tossing nballs balls
# uniformly at random into nbins bins. By definition, the number of
# collisions is the number of balls minus the number of occupied bins at
# the end.
def collision_stats(nbins, nballs):
n, k = nbins, nballs
# prob a bin empty after k trials = (1 - 1/n)**k
# mean # empty is then n * (1 - 1/n)**k
# so mean # occupied is n - n * (1 - 1/n)**k
# so collisions = k - (n - n*(1 - 1/n)**k)
#
# For the variance:
# n*(n-1)*(1-2/n)**k + meanempty - meanempty**2 =
# n*(n-1)*(1-2/n)**k + meanempty * (1 - meanempty)
#
# Massive cancellation occurs, and, e.g., for a 64-bit hash code
# 1-1/2**64 rounds uselessly to 1.0. Rather than make heroic (and
# error-prone) efforts to rework the naive formulas to avoid those,
# we use the `decimal` module to get plenty of extra precision.
#
# Note: the exact values are straightforward to compute with
# rationals, but in context that's unbearably slow, requiring
# multi-million bit arithmetic.
import decimal
with decimal.localcontext() as ctx:
bits = n.bit_length() * 2 # bits in n**2
# At least that many bits will likely cancel out.
# Use that many decimal digits instead.
ctx.prec = max(bits, 30)
dn = decimal.Decimal(n)
p1empty = ((dn - 1) / dn) ** k
meanempty = n * p1empty
occupied = n - meanempty
collisions = k - occupied
var = dn*(dn-1)*((dn-2)/dn)**k + meanempty * (1 - meanempty)
return float(collisions), float(var.sqrt())
class catch_unraisable_exception:
"""
Context manager catching unraisable exception using sys.unraisablehook.
Storing the exception value (cm.unraisable.exc_value) creates a reference
cycle. The reference cycle is broken explicitly when the context manager
exits.
Storing the object (cm.unraisable.object) can resurrect it if it is set to
an object which is being finalized. Exiting the context manager clears the
stored object.
Usage:
with support.catch_unraisable_exception() as cm:
# code creating an "unraisable exception"
...
# check the unraisable exception: use cm.unraisable
...
# cm.unraisable attribute no longer exists at this point
# (to break a reference cycle)
"""
def __init__(self):
self.unraisable = None
self._old_hook = None
def _hook(self, unraisable):
# Storing unraisable.object can resurrect an object which is being
# finalized. Storing unraisable.exc_value creates a reference cycle.
self.unraisable = unraisable
def __enter__(self):
self._old_hook = sys.unraisablehook
sys.unraisablehook = self._hook
return self
def __exit__(self, *exc_info):
sys.unraisablehook = self._old_hook
del self.unraisable
class catch_threading_exception:
"""
Context manager catching threading.Thread exception using
threading.excepthook.
Attributes set when an exception is catched:
* exc_type
* exc_value
* exc_traceback
* thread
See threading.excepthook() documentation for these attributes.
These attributes are deleted at the context manager exit.
Usage:
with support.catch_threading_exception() as cm:
# code spawning a thread which raises an exception
...
# check the thread exception, use cm attributes:
# exc_type, exc_value, exc_traceback, thread
...
# exc_type, exc_value, exc_traceback, thread attributes of cm no longer
# exists at this point
# (to avoid reference cycles)
"""
def __init__(self):
self.exc_type = None
self.exc_value = None
self.exc_traceback = None
self.thread = None
self._old_hook = None
def _hook(self, args):
self.exc_type = args.exc_type
self.exc_value = args.exc_value
self.exc_traceback = args.exc_traceback
self.thread = args.thread
def __enter__(self):
self._old_hook = threading.excepthook
threading.excepthook = self._hook
return self
def __exit__(self, *exc_info):
threading.excepthook = self._old_hook
del self.exc_type
del self.exc_value
del self.exc_traceback
del self.thread
|
kikocorreoso/brython
|
www/src/Lib/test/support/__init__.py
|
Python
|
bsd-3-clause
| 115,373
|
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
import erpnext
test_dependencies = ["Employee", "Leave Type", "Leave Policy"]
class TestLeavePeriod(unittest.TestCase):
pass
def create_leave_period(from_date, to_date, company=None):
leave_period = frappe.db.get_value('Leave Period',
dict(company=company or erpnext.get_default_company(),
from_date=from_date,
to_date=to_date,
is_active=1), 'name')
if leave_period:
return frappe.get_doc("Leave Period", leave_period)
leave_period = frappe.get_doc({
"doctype": "Leave Period",
"company": company or erpnext.get_default_company(),
"from_date": from_date,
"to_date": to_date,
"is_active": 1
}).insert()
return leave_period
|
mhbu50/erpnext
|
erpnext/hr/doctype/leave_period/test_leave_period.py
|
Python
|
gpl-3.0
| 775
|
import numpy as np
from menpo.base import Copyable
class LinearModel(Copyable):
r"""
A Linear Model contains a matrix of vector components, each component
vector being made up of `features`.
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The components array.
"""
def __init__(self, components):
self._components = components # getter/setter variable
@property
def n_components(self):
r"""
The number of bases of the model.
:type: `int`
"""
return self._components.shape[0]
@property
def n_features(self):
r"""
The number of elements in each linear component.
:type: `int`
"""
return self.components.shape[-1]
@property
def components(self):
r"""
The components matrix of the linear model.
:type: ``(n_available_components, n_features)`` `ndarray`
"""
return self._components
@components.setter
def components(self, value):
r"""
Updates the components of this linear model, ensuring that the shape
of the components is not changed.
Parameters
----------
value : ``(n_components, n_features)`` `ndarray`
The new components array.
Raises
------
ValueError
Trying to replace components of shape {} with some of shape {}
"""
if value.shape != self._components.shape:
raise ValueError(
"Trying to replace components of shape {} with some of "
"shape {}".format(self.components.shape, value.shape))
else:
np.copyto(self._components, value, casting='safe')
def component_vector(self, index):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component_vector : ``(n_features,)`` `ndarray`
The component vector.
"""
return self.components[index]
def instance_vector(self, weights):
r"""
Creates a new vector instance of the model by weighting together the
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
The weightings for the first `n_weights` components that should be
used.
``weights[j]`` is the linear contribution of the j'th principal
component to the instance vector.
Returns
-------
vector : ``(n_features,)`` `ndarray`
The instance vector for the weighting provided.
"""
# just call the plural version and adapt
weights = np.asarray(weights) # if eg a list is provided
return self.instance_vectors(weights[None, :]).flatten()
def instance_vectors(self, weights):
"""
Creates new vectorized instances of the model using all the components
of the linear model.
Parameters
----------
weights : ``(n_vectors, n_weights)`` `ndarray` or `list` of `lists`
The weightings for all components of the linear model. All
components will be used to produce the instance.
``weights[i, j]`` is the linear contribution of the j'th
principal component to the i'th instance vector produced.
Raises
------
ValueError
If n_weights > n_available_components
Returns
-------
vectors : ``(n_vectors, n_features)`` `ndarray`
The instance vectors for the weighting provided.
"""
weights = np.asarray(weights) # if eg a list is provided
n_instances, n_weights = weights.shape
if not n_weights == self.n_components:
raise ValueError(
"Number of weightings has to match number of available "
"components = {}".format(self.n_components))
return self._instance_vectors_for_full_weights(weights)
# TODO check this is right
def _instance_vectors_for_full_weights(self, full_weights):
return np.dot(full_weights, self.components)
def project_vector(self, vector):
"""
Projects the `vector` onto the model, retrieving the optimal
linear reconstruction weights.
Parameters
----------
vector : ``(n_features,)`` `ndarray`
A vectorized novel instance.
Returns
-------
weights : ``(n_components,)`` `ndarray`
A vector of optimal linear weights.
"""
return self.project_vectors(vector[None, :]).flatten()
def project_vectors(self, vectors):
"""
Projects each of the `vectors` onto the model, retrieving
the optimal linear reconstruction weights for each instance.
Parameters
----------
vectors : ``(n_samples, n_features)`` `ndarray`
Array of vectorized novel instances.
Returns
-------
weights : ``(n_samples, n_components)`` `ndarray`
The matrix of optimal linear weights.
"""
return np.dot(vectors, self.components.T)
def reconstruct_vector(self, vector):
"""
Project a `vector` onto the linear space and rebuild from the weights
found.
Parameters
----------
vector : ``(n_features, )`` `ndarray`
A vectorized novel instance to project.
Returns
-------
reconstructed : ``(n_features,)`` `ndarray`
The reconstructed vector.
"""
return self.reconstruct_vectors(vector[None, :]).flatten()
def reconstruct_vectors(self, vectors):
"""
Projects the `vectors` onto the linear space and rebuilds vectors from
the weights found.
Parameters
----------
vectors : ``(n_vectors, n_features)`` `ndarray`
A set of vectors to project.
Returns
-------
reconstructed : ``(n_vectors, n_features)`` `ndarray`
The reconstructed vectors.
"""
return self.instance_vectors(self.project_vectors(vectors))
def project_out_vector(self, vector):
"""
Returns a version of `vector` where all the basis of the model have
been projected out.
Parameters
----------
vector : ``(n_features,)`` `ndarray`
A novel vector.
Returns
-------
projected_out : ``(n_features,)`` `ndarray`
A copy of `vector` with all basis of the model projected out.
"""
return self.project_out_vectors(vector[None, :])
def project_out_vectors(self, vectors):
"""
Returns a version of `vectors` where all the basis of the model have
been projected out.
Parameters
----------
vectors : ``(n_vectors, n_features)`` `ndarray`
A matrix of novel vectors.
Returns
-------
projected_out : ``(n_vectors, n_features)`` `ndarray`
A copy of `vectors` with all basis of the model projected out.
"""
weights = np.dot(vectors, self.components.T)
return vectors - np.dot(weights, self.components)
def orthonormalize_inplace(self):
r"""
Enforces that this model's components are orthonormalized,
s.t. ``component_vector(i).dot(component_vector(j) = dirac_delta``.
"""
Q = np.linalg.qr(self.components.T)[0].T
self.components[...] = Q
# TODO: Investigate the meaning and consequences of trying to
# orthonormalize two identical vectors
def orthonormalize_against_inplace(self, linear_model):
r"""
Enforces that the union of this model's components and another are
both mutually orthonormal.
Both models keep its number of components unchanged or else a value
error is raised.
Parameters
----------
linear_model : :class:`LinearModel`
A second linear model to orthonormalize this against.
Raises
------
ValueError
The number of features must be greater or equal than the sum of the
number of components in both linear models ({} < {})
"""
n_components_sum = self.n_components + linear_model.n_components
if not self.n_features >= n_components_sum:
raise ValueError(
"The number of features must be greater or equal than the "
"sum of the number of components in both linear models ({} < "
"{})".format(self.n_features, n_components_sum))
# take the QR decomposition of the model components
Q = (np.linalg.qr(np.hstack((linear_model._components.T,
self._components.T)))[0]).T
# set the orthonormalized components of the model being passed
linear_model.components = Q[:linear_model.n_components, :]
# set the orthonormalized components of this model
self.components = Q[linear_model.n_components:, :]
class MeanLinearModel(LinearModel):
r"""
A Linear Model containing a matrix of vector components, each component
vector being made up of `features`. The model additionally has a mean
component which is handled accordingly when either:
1. A component of the model is selected
2. A projection operation is performed
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The components array.
mean_vector : ``(n_features,)`` `ndarray`
The mean vector.
"""
def __init__(self, components, mean_vector):
super(MeanLinearModel, self).__init__(components)
self.mean_vector = mean_vector
def component_vector(self, index, with_mean=True, scale=1.0):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned
with_mean : `bool`, optional
If ``True``, the component will be blended with the mean vector
before being returned. If not, the component is returned on it's
own.
scale : `float`, optional
A scale factor that should be directly applied to the component.
Only valid in the case where ``with_mean == True``.
Returns
-------
component_vector : ``(n_features,)`` `ndarray`
The component vector.
"""
if with_mean:
return (scale * self.components[index]) + self.mean_vector
else:
return self.components[index]
def project_vectors(self, vectors):
"""
Projects each of the `vectors` onto the model, retrieving
the optimal linear reconstruction weights for each instance.
Parameters
----------
vectors : ``(n_samples, n_features)`` `ndarray`
Array of vectorized novel instances.
Returns
-------
projected : ``(n_samples, n_components)`` `ndarray`
The matrix of optimal linear weights.
"""
X = vectors - self.mean_vector
return np.dot(X, self.components.T)
def _instance_vectors_for_full_weights(self, full_weights):
x = LinearModel._instance_vectors_for_full_weights(self, full_weights)
return x + self.mean_vector
|
mozata/menpo
|
menpo/model/linear.py
|
Python
|
bsd-3-clause
| 11,640
|
import RPi.GPIO as GPIO
import time
#Intialization
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#Intialize Push Button
GPIO.setup(18, GPIO.IN,pull_up_down=GPIO.PUD_UP)
#Intialize LED
GPIO.setup(17, GPIO.OUT)
# When Button is PRESSED
def buttonPressed():
GPIO.output(17,GPIO.HIGH)
return
# When Button is RELEASED
def buttonReleased():
GPIO.output(17, GPIO.LOW)
return
while True:
#User Input
inputValue = GPIO.input(18)
if (inputValue == False):
buttonPressed()
else:
buttonReleased()
time.sleep(0.5)
|
greenSyntax/AWS-RaspberryPi
|
RaspberyPi AWS/push.py
|
Python
|
mit
| 565
|
import os
import glob
all_in_ones = glob.glob("*.all_in_one")
for all_in_one in all_in_ones:
gene_call = open(os.path.join(all_in_one, all_in_one.split(".")[0] + ".fasta"), "r").readline()
gene_call = gene_call.strip().replace(">","")
os.rename(all_in_one, gene_call + ".all_in_one")
|
merenlab/web
|
data/sar11-saavs/files/rename_all_in_ones.py
|
Python
|
mit
| 298
|
#!/bin/env python2.7
import sys
import os
import argparse
from collections import defaultdict
import re
def main():
args=processArgs()
assemblyValues = {}
with open(args.seqfile,'r') as fh :
priorcomp = ""
priorsub = ""
priorseq = ""
for line in fh :
line = line.strip('\r\n')
if re.search('^>',line) != None :
result = re.search('^>.*(c\d+).*(c\d+).*(seq\d+)',line)
if result.lastindex != 3 :
print "The fasta header didn't match the pattern expected. Exiting."
sys.exit()
comp,sub,seq = result.groups()
#print "comp,sub,seq=%s, %s, %s" % (comp,sub,seq)
if comp not in assemblyValues :
assemblyValues[comp] = {}
assemblyValues[comp][sub] = {}
elif sub not in assemblyValues[comp] :
assemblyValues[comp][sub] = {}
priorcomp = comp
priorsub = sub
priorseq = seq
else :
assemblyValues[priorcomp][priorsub][priorseq] = len(line)
print assemblyValues
compcount = len(assemblyValues)
subcount = 0
seqcount = 0
seqspercomp = {}
seqspersub = {}
subspercomp = {}
for comp in assemblyValues :
thissubcount = len(assemblyValues[comp])
if thissubcount in subspercomp :
subspercomp[thissubcount] += 1
else :
subspercomp[thissubcount] = 1
compseqcount = 0
for sub in assemblyValues[comp] :
thisseqcount = len(assemblyValues[comp][sub])
if thisseqcount in seqspersub :
seqspersub[thisseqcount] += 1
else :
seqspersub[thisseqcount] = 1
subcount += 1
compseqcount += thisseqcount
for seq in assemblyValues[comp][sub] :
seqcount += 1
if compseqcount in seqspercomp :
seqspercomp[compseqcount] += 1
else :
seqspercomp[compseqcount] = 1
print "subspercomp=%s" % (subspercomp)
print "seqspercomp=%s" % (seqspercomp)
print "seqspersub=%s" % (seqspersub)
def processArgs():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('\nError: %s\n\n' % message)
self.print_help()
sys.exit(2)
class Checkerrors(argparse.Action) :
def __call__(self,parser,namespace,value,option_string) :
if (option_string==None) :
if (os.path.isfile(value)==False) :
parser.error("The 'dir' flag needs a valid filename.")
else :
setattr(namespace,self.dest,value)
#argParser = MyParser(usage=("%s (sourceDir & filter) | filterFile" % (os.path.basename(sys.argv[0]))))
argParser = MyParser(description="""Characterizes Trinity output. Looks at how many sequences exist in each comp and subcomp.""")
argParser.add_argument('seqfile', metavar="", action=Checkerrors, help="A combined trinity sequence file.")
ap=argParser.parse_args()
return ap
#This is required because by default this is a module. Running this makes it execute main as if it is a script
if __name__ == '__main__':
main()
|
sanjuroj/bioscripts
|
characterizeTrinity.py
|
Python
|
mit
| 3,442
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urlparse import parse_qs
from django.test import override_settings
from django.test.client import RequestFactory
import querystringsafe_base64
from mock import patch, Mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from bedrock.firefox import views
from bedrock.mozorg.tests import TestCase
@override_settings(STUB_ATTRIBUTION_HMAC_KEY='achievers',
STUB_ATTRIBUTION_RATE=1)
@patch.object(views, 'time', Mock(return_value=12345.678))
class TestStubAttributionCode(TestCase):
def _get_request(self, params):
rf = RequestFactory()
return rf.get('/', params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
HTTP_ACCEPT='application/json')
def test_not_ajax_request(self):
req = RequestFactory().get('/', {'source': 'malibu'})
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 400)
assert 'cache-control' not in resp
data = json.loads(resp.content)
self.assertEqual(data['error'], 'Resource only available via XHR')
def test_no_valid_param_names(self):
req = self._get_request({'dude': 'abides'})
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 400)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
self.assertEqual(data['error'], 'no params')
def test_no_valid_param_data(self):
params = {'utm_source': 'br@ndt', 'utm_medium': 'ae<t>her'}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 400)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
self.assertEqual(data['error'], 'no params')
def test_some_valid_param_data(self):
params = {'utm_source': 'brandt', 'utm_content': 'ae<t>her'}
final_params = {
'source': 'brandt',
'medium': '(direct)',
'campaign': '(not set)',
'content': '(not set)',
'timestamp': '12345',
}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 200)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
# will it blend?
attrs = parse_qs(querystringsafe_base64.decode(data['attribution_code']))
# parse_qs returns a dict with lists for values
attrs = {k: v[0] for k, v in attrs.items()}
self.assertDictEqual(attrs, final_params)
self.assertEqual(data['attribution_sig'],
'bd6c54115eb1f331b64bec83225a667fa0e16090d7d6abb33dab6305cd858a9d')
def test_returns_valid_data(self):
params = {'utm_source': 'brandt', 'utm_medium': 'aether'}
final_params = {
'source': 'brandt',
'medium': 'aether',
'campaign': '(not set)',
'content': '(not set)',
'timestamp': '12345',
}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 200)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
# will it blend?
attrs = parse_qs(querystringsafe_base64.decode(data['attribution_code']))
# parse_qs returns a dict with lists for values
attrs = {k: v[0] for k, v in attrs.items()}
self.assertDictEqual(attrs, final_params)
self.assertEqual(data['attribution_sig'],
'ab55c9b24e230f08d3ad50bf9a3a836ef4405cfb6919cb1df8efe208be38e16d')
def test_handles_referrer(self):
params = {'utm_source': 'brandt', 'referrer': 'https://duckduckgo.com/privacy'}
final_params = {
'source': 'brandt',
'medium': '(direct)',
'campaign': '(not set)',
'content': '(not set)',
'timestamp': '12345',
}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 200)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
# will it blend?
attrs = parse_qs(querystringsafe_base64.decode(data['attribution_code']))
# parse_qs returns a dict with lists for values
attrs = {k: v[0] for k, v in attrs.items()}
self.assertDictEqual(attrs, final_params)
self.assertEqual(data['attribution_sig'],
'bd6c54115eb1f331b64bec83225a667fa0e16090d7d6abb33dab6305cd858a9d')
def test_handles_referrer_no_source(self):
params = {'referrer': 'https://example.com:5000/searchin', 'utm_medium': 'aether'}
final_params = {
'source': 'example.com:5000',
'medium': 'referral',
'campaign': '(not set)',
'content': '(not set)',
'timestamp': '12345',
}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 200)
assert resp['cache-control'] == 'max-age=300'
data = json.loads(resp.content)
# will it blend?
attrs = parse_qs(querystringsafe_base64.decode(data['attribution_code']))
# parse_qs returns a dict with lists for values
attrs = {k: v[0] for k, v in attrs.items()}
self.assertDictEqual(attrs, final_params)
self.assertEqual(data['attribution_sig'],
'6b3dbb178e9abc22db66530df426b17db8590e8251fc153ba443e81ca60e355e')
@override_settings(STUB_ATTRIBUTION_RATE=0.2)
def test_rate_limit(self):
params = {'utm_source': 'brandt', 'utm_medium': 'aether'}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 200)
assert resp['cache-control'] == 'max-age=300'
@override_settings(STUB_ATTRIBUTION_RATE=0)
def test_rate_limit_disabled(self):
params = {'utm_source': 'brandt', 'utm_medium': 'aether'}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 429)
assert resp['cache-control'] == 'max-age=300'
@override_settings(STUB_ATTRIBUTION_HMAC_KEY='')
def test_no_hmac_key_set(self):
params = {'utm_source': 'brandt', 'utm_medium': 'aether'}
req = self._get_request(params)
resp = views.stub_attribution_code(req)
self.assertEqual(resp.status_code, 403)
assert resp['cache-control'] == 'max-age=300'
class TestSendToDeviceView(TestCase):
def setUp(self):
patcher = patch('bedrock.firefox.views.basket.subscribe')
self.mock_subscribe = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch('bedrock.firefox.views.basket.request')
self.mock_send_sms = patcher.start()
self.addCleanup(patcher.stop)
def _request(self, data, expected_status=200, locale='en-US'):
req = RequestFactory().post('/', data)
req.locale = locale
resp = views.send_to_device_ajax(req)
eq_(resp.status_code, expected_status)
return json.loads(resp.content)
def test_phone_or_email_required(self):
resp_data = self._request({
'platform': 'android',
})
ok_(not resp_data['success'])
ok_('phone-or-email' in resp_data['errors'])
ok_(not self.mock_send_sms.called)
ok_(not self.mock_subscribe.called)
def test_send_android_sms(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['android'],
'lang': 'en-US',
})
def test_send_android_sms_non_en_us(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '015558675309',
}, locale='de')
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '015558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['android'],
'lang': 'de',
})
def test_send_android_sms_with_country(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
'country': 'de',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['android'],
'lang': 'en-US',
'country': 'de',
})
def test_send_android_sms_with_invalid_country(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
'country': 'X2',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['android'],
'lang': 'en-US',
})
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
'country': 'dude',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['android'],
'lang': 'en-US',
})
def test_send_android_sms_basket_error(self):
self.mock_send_sms.side_effect = views.basket.BasketException
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
}, 400)
ok_(not resp_data['success'])
ok_('system' in resp_data['errors'])
def test_send_bad_sms_number(self):
self.mock_send_sms.side_effect = views.basket.BasketException('mobile_number is invalid')
resp_data = self._request({
'platform': 'android',
'phone-or-email': '555',
})
ok_(not resp_data['success'])
ok_('number' in resp_data['errors'])
def test_send_android_email(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': 'dude@example.com',
'source-url': 'https://nihilism.info',
})
ok_(resp_data['success'])
self.mock_subscribe.assert_called_with('dude@example.com',
views.SEND_TO_DEVICE_MESSAGE_SETS['default']['email']['android'],
source_url='https://nihilism.info',
lang='en-US')
def test_send_android_email_basket_error(self):
self.mock_subscribe.side_effect = views.basket.BasketException
resp_data = self._request({
'platform': 'android',
'phone-or-email': 'dude@example.com',
'source-url': 'https://nihilism.info',
}, 400)
ok_(not resp_data['success'])
ok_('system' in resp_data['errors'])
def test_send_android_bad_email(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '@example.com',
'source-url': 'https://nihilism.info',
})
ok_(not resp_data['success'])
ok_('email' in resp_data['errors'])
ok_(not self.mock_subscribe.called)
# an invalid value for 'message-set' should revert to 'default' message set
def test_invalid_message_set(self):
resp_data = self._request({
'platform': 'ios',
'phone-or-email': '5558675309',
'message-set': 'the-dude-is-not-in',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['default']['sms']['ios'],
'lang': 'en-US',
})
# /firefox/android/ embedded widget (bug 1221328)
def test_android_embedded_email(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': 'dude@example.com',
'message-set': 'fx-android',
})
ok_(resp_data['success'])
self.mock_subscribe.assert_called_with('dude@example.com',
views.SEND_TO_DEVICE_MESSAGE_SETS['fx-android']['email']['android'],
source_url=None,
lang='en-US')
def test_android_embedded_sms(self):
resp_data = self._request({
'platform': 'android',
'phone-or-email': '5558675309',
'message-set': 'fx-android',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['fx-android']['sms']['android'],
'lang': 'en-US',
})
# /firefox/mobile-download/desktop
def test_fx_mobile_download_desktop_email(self):
resp_data = self._request({
'phone-or-email': 'dude@example.com',
'message-set': 'fx-mobile-download-desktop',
})
ok_(resp_data['success'])
self.mock_subscribe.assert_called_with('dude@example.com',
views.SEND_TO_DEVICE_MESSAGE_SETS['fx-mobile-download-desktop']['email']['all'],
source_url=None,
lang='en-US')
def test_fx_mobile_download_desktop_sms(self):
resp_data = self._request({
'phone-or-email': '5558675309',
'message-set': 'fx-mobile-download-desktop',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['fx-mobile-download-desktop']['sms']['all'],
'lang': 'en-US',
})
def test_sms_number_with_punctuation(self):
resp_data = self._request({
'phone-or-email': '(555) 867-5309',
'message-set': 'fx-mobile-download-desktop',
})
ok_(resp_data['success'])
self.mock_send_sms.assert_called_with('post', 'subscribe_sms', data={
'mobile_number': '5558675309',
'msg_name': views.SEND_TO_DEVICE_MESSAGE_SETS['fx-mobile-download-desktop']['sms']['all'],
'lang': 'en-US',
})
def test_sms_number_too_long(self):
resp_data = self._request({
'phone-or-email': '5558675309555867530912',
'message-set': 'fx-mobile-download-desktop',
})
ok_(not resp_data['success'])
self.mock_send_sms.assert_not_called()
ok_('number' in resp_data['errors'])
def test_sms_number_too_short(self):
resp_data = self._request({
'phone-or-email': '555',
'message-set': 'fx-mobile-download-desktop',
})
ok_(not resp_data['success'])
self.mock_send_sms.assert_not_called()
ok_('number' in resp_data['errors'])
@override_settings(DEV=False)
@patch('bedrock.firefox.views.l10n_utils.render')
class TestFirefoxNew(TestCase):
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_scene_1_template(self, render_mock):
req = RequestFactory().get('/firefox/new/')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_scene_2_template(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: False)
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_scene_1_template_locale_inactive(self, render_mock):
req = RequestFactory().get('/firefox/new/')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: False)
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_scene_2_template_locale_inactive(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/scene2.html')
# ad-campaign experience tests (bug 1329661)
def test_break_free_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=breakfree')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/break-free/scene1.html')
def test_break_free_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=breakfree')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/break-free/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_break_free_locale_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=breakfree')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_break_free_locale_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=breakfree')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene2.html')
def test_way_of_the_fox_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=wayofthefox')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/way-of-the-fox/scene1.html')
def test_way_of_the_fox_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=wayofthefox')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/way-of-the-fox/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_way_of_the_fox_locale_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=wayofthefox')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_way_of_the_fox_locale_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=wayofthefox')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/quantum/scene2.html')
# moar ad campaign pages bug 1363543
def test_private_not_option_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=privatenotoption')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/private-not-option/scene1.html')
def test_private_not_option_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=privatenotoption')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/private-not-option/scene2.html')
def test_conformity_not_default_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=conformitynotdefault')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/conformity-not-default/scene1.html')
def test_conformity_not_default_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=conformitynotdefault')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/conformity-not-default/scene2.html')
def test_browse_up_to_you_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=browseuptoyou')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/browse-up-to-you/scene1.html')
def test_browse_up_to_you_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=browseuptoyou')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/browse-up-to-you/scene2.html')
def test_more_protection_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=moreprotection')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/more-protection/scene1.html')
def test_more_protection_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=moreprotection')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/more-protection/scene2.html')
def test_working_out_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=workingout')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/working-out/scene1.html')
def test_working_out_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=workingout')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/working-out/scene2.html')
def test_you_do_you_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=youdoyou')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/you-do-you/scene1.html')
def test_you_do_you_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=youdoyou')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/you-do-you/scene2.html')
def test_its_your_web_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=itsyourweb')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/its-your-web/scene1.html')
def test_its_your_web_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=itsyourweb')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/fx-lifestyle/its-your-web/scene2.html')
# sem program campaign bug 1383063
def test_secure_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=secure')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/secure/scene1.html')
def test_secure_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=secure')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/secure/scene2.html')
def test_nonprofit_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=nonprofit')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/nonprofit/scene1.html')
def test_nonprofit_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=nonprofit')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/nonprofit/scene2.html')
def test_compatible_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=compatible')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/compatible/scene1.html')
def test_compatiblet_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=compatible')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/compatible/scene2.html')
def test_unsupported_browser_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=unsupported-browser')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/unsupported-browser/scene1.html')
def test_unsupported_browser_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=unsupported-browser')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/unsupported-browser/scene2.html')
# localized sem /fast page bug 1392680
def test_fast_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=fast')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/fast/scene1.html')
def test_fast_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=fast')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/fast/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
def test_fast_locale_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=fast')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/fast/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
def test_fast_locale_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=fast')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/sem/fast/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: False)
def test_fast_not_translated_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=fast')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/scene1.html')
@patch.object(views, 'lang_file_is_active', lambda *x: False)
def test_fast_not_translated_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=fast')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/scene2.html')
# browse against the machine bug 1363802, 1364988.
def test_batmfree_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmfree')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/free.html')
def test_batmfree_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmfree')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
def test_batmprivate_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmprivate')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/private.html')
def test_batmprivate_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmprivate')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
def test_batmprivate_locale_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmprivate')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/private.html')
@patch.object(views, 'lang_file_is_active', lambda *x: True)
def test_batmprivate_locale_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmprivate')
req.locale = 'de'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
def test_batmnimble_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmnimble')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/nimble.html')
def test_batmnimble_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmnimble')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
def test_batmresist_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmresist')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/resist.html')
def test_batmresist_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmresist')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
# browse against the machine animation bug 1380044
def test_batmb_scene_1(self, render_mock):
req = RequestFactory().get('/firefox/new/?xv=batmprivate&v=b')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/machine.html')
def test_batmb_scene_2(self, render_mock):
req = RequestFactory().get('/firefox/new/?scene=2&xv=batmprivate&v=b')
req.locale = 'en-US'
views.new(req)
render_mock.assert_called_once_with(req, 'firefox/new/batm/scene2.html')
class TestFirefoxNewNoIndex(TestCase):
def test_scene_1_noindex(self):
# Scene 1 of /firefox/new/ should never contain a noindex tag.
req = RequestFactory().get('/firefox/new/')
req.locale = 'en-US'
response = views.new(req)
doc = pq(response.content)
robots = doc('meta[name="robots"]')
eq_(robots.length, 0)
def test_scene_2_noindex(self):
# Scene 2 of /firefox/new/ should always contain a noindex tag.
req = RequestFactory().get('/firefox/new/?scene=2')
req.locale = 'en-US'
response = views.new(req)
doc = pq(response.content)
robots = doc('meta[name="robots"]')
eq_(robots.length, 1)
ok_('noindex' in robots.attr('content'))
class TestFeedbackView(TestCase):
def test_get_template_names_default_unhappy(self):
view = views.FeedbackView()
view.request = RequestFactory().get('/')
eq_(view.get_template_names(), ['firefox/feedback/unhappy.html'])
def test_get_template_names_happy(self):
view = views.FeedbackView()
view.request = RequestFactory().get('/?score=5')
eq_(view.get_template_names(), ['firefox/feedback/happy.html'])
def test_get_template_names_unhappy(self):
view = views.FeedbackView()
view.request = RequestFactory().get('/?score=1')
eq_(view.get_template_names(), ['firefox/feedback/unhappy.html'])
def test_get_context_data_three_stars(self):
view = views.FeedbackView()
view.request = RequestFactory().get('/?score=3')
ctx = view.get_context_data()
self.assertTrue(ctx['donate_stars_url'].endswith('Heartbeat_3stars'))
def test_get_context_data_five_stars(self):
view = views.FeedbackView()
view.request = RequestFactory().get('/?score=5')
ctx = view.get_context_data()
self.assertTrue(ctx['donate_stars_url'].endswith('Heartbeat_5stars'))
def test_get_context_data_one_star(self):
"""donate_stars_url should be undefined"""
view = views.FeedbackView()
view.request = RequestFactory().get('/?score=1')
ctx = view.get_context_data()
self.assertFalse('donate_stars_url' in ctx)
@patch('bedrock.firefox.views.l10n_utils.render')
class TestFeaturesPages(TestCase):
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_bookmarks_pre_57(self, render_mock):
view = views.FeaturesBookmarksView.as_view()
req = RequestFactory().get('/en-US/firefox/features/bookmarks/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/bookmarks.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_bookmarks_post_57(self, render_mock):
view = views.FeaturesBookmarksView.as_view()
req = RequestFactory().get('/en-US/firefox/features/bookmarks/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/bookmarks.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_fast_pre_57(self, render_mock):
view = views.FeaturesFastView.as_view()
req = RequestFactory().get('/en-US/firefox/features/fast/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/fast.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_fast_post_57(self, render_mock):
view = views.FeaturesFastView.as_view()
req = RequestFactory().get('/en-US/firefox/features/fast/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/fast.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_independent_pre_57(self, render_mock):
view = views.FeaturesIndependentView.as_view()
req = RequestFactory().get('/en-US/firefox/features/independent/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/independent.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_independent_post_57(self, render_mock):
view = views.FeaturesIndependentView.as_view()
req = RequestFactory().get('/en-US/firefox/features/independent/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/independent.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_index_pre_57(self, render_mock):
req = RequestFactory().get('/firefox/features/')
views.features(req)
render_mock.assert_called_once_with(req, 'firefox/features/index.html')
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_index_post_57(self, render_mock):
req = RequestFactory().get('/firefox/features/')
views.features(req)
render_mock.assert_called_once_with(req, 'firefox/features/quantum/index.html')
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_memory_pre_57(self, render_mock):
view = views.FeaturesMemoryView.as_view()
req = RequestFactory().get('/en-US/firefox/features/memory/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/memory.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_memory_post_57(self, render_mock):
view = views.FeaturesMemoryView.as_view()
req = RequestFactory().get('/en-US/firefox/features/memory/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/memory.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_password_manager_pre_57(self, render_mock):
view = views.FeaturesPasswordManagerView.as_view()
req = RequestFactory().get('/en-US/firefox/features/password-manager/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/password-manager.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_password_manager_post_57(self, render_mock):
view = views.FeaturesPasswordManagerView.as_view()
req = RequestFactory().get('/en-US/firefox/features/password-manager/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/password-manager.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_private_browsing_pre_57(self, render_mock):
view = views.FeaturesPrivateBrowsingView.as_view()
req = RequestFactory().get('/en-US/firefox/features/private-browsing/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/private-browsing.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_private_browsing_post_57(self, render_mock):
view = views.FeaturesPrivateBrowsingView.as_view()
req = RequestFactory().get('/en-US/firefox/features/private-browsing/')
view(req)
template = render_mock.call_args[0][1]
eq_(template, ['firefox/features/quantum/private-browsing.html'])
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_send_tabs_pre_57(self, render_mock):
req = RequestFactory().get('/firefox/features/send-tabs/')
views.send_tabs(req)
render_mock.assert_called_once_with(req, 'firefox/features/send-tabs.html')
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_send_tabs_post_57(self, render_mock):
req = RequestFactory().get('/firefox/features/send-tabs/')
views.send_tabs(req)
render_mock.assert_called_once_with(req, 'firefox/features/quantum/send-tabs.html')
@patch('bedrock.firefox.views.switch', Mock(return_value=False))
def test_sync_page_template(self, render_mock):
req = RequestFactory().get('/firefox/features/sync/')
req.locale = 'en-US'
views.sync_page(req)
render_mock.assert_called_once_with(req, 'firefox/features/sync.html')
@patch('bedrock.firefox.views.switch', Mock(return_value=True))
def test_sync_page_57_template(self, render_mock):
req = RequestFactory().get('/firefox/features/sync/')
req.locale = 'en-US'
views.sync_page(req)
render_mock.assert_called_once_with(req, 'firefox/features/quantum/sync.html')
|
schalkneethling/bedrock
|
bedrock/firefox/tests/test_views.py
|
Python
|
mpl-2.0
| 40,623
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from pysmt.exceptions import SolverAPINotFound
try:
import z3
except ImportError:
raise SolverAPINotFound
from functools import partial as p_
from six.moves import xrange
import pysmt.typing as types
import pysmt.operators as op
from pysmt.solvers.solver import (IncrementalTrackingSolver, UnsatCoreSolver,
Model, Converter, SolverOptions)
from pysmt.solvers.smtlib import SmtLibBasicSolver, SmtLibIgnoreMixin
from pysmt.solvers.qelim import QuantifierEliminator
from pysmt.solvers.interpolation import Interpolator
from pysmt.walkers import DagWalker
from pysmt.exceptions import (SolverReturnedUnknownResultError,
SolverNotConfiguredForUnsatCoresError,
SolverStatusError,
ConvertExpressionError,
UndefinedSymbolError, PysmtValueError)
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from pysmt.logics import LRA, LIA, QF_UFLIA, QF_UFLRA, PYSMT_LOGICS
from pysmt.oracles import get_logic
from pysmt.constants import Fraction, Numeral, is_pysmt_integer, to_python_integer
# patch z3api
z3.is_ite = lambda x: z3.is_app_of(x, z3.Z3_OP_ITE)
z3.is_function = lambda x: z3.is_app_of(x, z3.Z3_OP_UNINTERPRETED)
z3.is_array_store = lambda x: z3.is_app_of(x, z3.Z3_OP_STORE)
z3.get_payload = lambda node,i : z3.Z3_get_decl_int_parameter(node.ctx.ref(),
node.decl().ast, i)
class AstRefKey:
def __init__(self, n):
self.n = n
def __hash__(self):
return self.n.hash()
def __eq__(self, other):
return self.n.eq(other.n)
def askey(n):
assert isinstance(n, z3.AstRef)
return AstRefKey(n)
class Z3Model(Model):
def __init__(self, environment, z3_model):
Model.__init__(self, environment)
self.z3_model = z3_model
self.converter = Z3Converter(environment, z3_model.ctx)
def get_value(self, formula, model_completion=True):
titem = self.converter.convert(formula)
z3_res = self.z3_model.eval(titem, model_completion=model_completion)
return self.converter.back(z3_res, model=self.z3_model)
def iterator_over(self, language):
for x in language:
yield x, self.get_value(x, model_completion=True)
def __iter__(self):
"""Overloading of iterator from Model. We iterate only on the
variables defined in the assignment.
"""
for d in self.z3_model.decls():
if d.arity() == 0:
try:
pysmt_d = self.converter.back(d())
yield pysmt_d, self.get_value(pysmt_d)
except UndefinedSymbolError:
# avoids problems with symbols generated by z3
pass
def __contains__(self, x):
"""Returns whether the model contains a value for 'x'."""
return x in (v for v, _ in self)
# EOC Z3Model
class Z3Options(SolverOptions):
@staticmethod
def _set_option(z3solver, name, value):
try:
z3solver.set(name, value)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name, value))
def __call__(self, solver):
self._set_option(solver.z3, 'model', self.generate_models)
if self.unsat_cores_mode is not None:
self._set_option(solver.z3, 'unsat_core', True)
if self.random_seed is not None:
self._set_option(solver.z3, 'random_seed', self.random_seed)
for k,v in self.solver_options.items():
try:
self._set_option(solver.z3, str(k), v)
except z3.Z3Exception:
raise PysmtValueError("Error setting the option '%s=%s'" % (k,v))
# EOC Z3Options
class Z3Solver(IncrementalTrackingSolver, UnsatCoreSolver,
SmtLibBasicSolver, SmtLibIgnoreMixin):
LOGICS = PYSMT_LOGICS
OptionsClass = Z3Options
def __init__(self, environment, logic, **options):
IncrementalTrackingSolver.__init__(self,
environment=environment,
logic=logic,
**options)
self.z3 = z3.SolverFor(str(logic))
self.options(self)
self.declarations = set()
self.converter = Z3Converter(environment, z3_ctx=self.z3.ctx)
self.mgr = environment.formula_manager
self._name_cnt = 0
return
@clear_pending_pop
def _reset_assertions(self):
self.z3.reset()
self.options(self)
@clear_pending_pop
def declare_variable(self, var):
raise NotImplementedError
@clear_pending_pop
def _add_assertion(self, formula, named=None):
self._assert_is_boolean(formula)
term = self.converter.convert(formula)
if self.options.unsat_cores_mode is not None:
# TODO: IF unsat_cores_mode is all, then we add this fresh variable.
# Otherwise, we should track this only if it is named.
key = self.mgr.FreshSymbol(template="_assertion_%d")
tkey = self.converter.convert(key)
self.z3.assert_and_track(term, tkey)
return (key, named, formula)
else:
self.z3.add(term)
return formula
def get_model(self):
return Z3Model(self.environment, self.z3.model())
@clear_pending_pop
def _solve(self, assumptions=None):
if assumptions is not None:
bool_ass = []
other_ass = []
for x in assumptions:
if x.is_literal():
bool_ass.append(self.converter.convert(x))
else:
other_ass.append(x)
if len(other_ass) > 0:
self.push()
self.add_assertion(self.mgr.And(other_ass))
self.pending_pop = True
res = self.z3.check(*bool_ass)
else:
res = self.z3.check()
sres = str(res)
assert sres in ['unknown', 'sat', 'unsat']
if sres == 'unknown':
raise SolverReturnedUnknownResultError
return (sres == 'sat')
def get_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
set of formulae"""
return self.get_named_unsat_core().values()
def _named_assertions_map(self):
if self.options.unsat_cores_mode is not None:
return dict((t[0], (t[1],t[2])) for t in self.assertions)
return None
def get_named_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
dict of names to formulae"""
if self.options.unsat_cores_mode is None:
raise SolverNotConfiguredForUnsatCoresError
if self.last_result is not False:
raise SolverStatusError("The last call to solve() was not" \
" unsatisfiable")
if self.last_command != "solve":
raise SolverStatusError("The solver status has been modified by a" \
" '%s' command after the last call to" \
" solve()" % self.last_command)
assumptions = self.z3.unsat_core()
pysmt_assumptions = set(self.converter.back(t) for t in assumptions)
res = {}
n_ass_map = self._named_assertions_map()
cnt = 0
for key in pysmt_assumptions:
if key in n_ass_map:
(name, formula) = n_ass_map[key]
if name is None:
name = "_a_%d" % cnt
cnt += 1
res[name] = formula
return res
@clear_pending_pop
def all_sat(self, important, callback):
raise NotImplementedError
@clear_pending_pop
def _push(self, levels=1):
for _ in xrange(levels):
self.z3.push()
@clear_pending_pop
def _pop(self, levels=1):
for _ in xrange(levels):
self.z3.pop()
def print_model(self, name_filter=None):
for var in self.declarations:
if name_filter is None or not var.symbol_name().startswith(name_filter):
print("%s = %s" % (var.symbol_name(), self.get_value(var)))
def get_value(self, item):
self._assert_no_function_type(item)
titem = self.converter.convert(item)
z3_res = self.z3.model().eval(titem, model_completion=True)
res = self.converter.back(z3_res, self.z3.model())
if not res.is_constant():
return res.simplify()
return res
def _exit(self):
del self.z3
BOOLREF_SET = op.BOOL_OPERATORS | op.RELATIONS
ARITHREF_SET = op.IRA_OPERATORS
BITVECREF_SET = op.BV_OPERATORS
class Z3Converter(Converter, DagWalker):
def __init__(self, environment, z3_ctx):
DagWalker.__init__(self, environment)
self.mgr = environment.formula_manager
self._get_type = environment.stc.get_type
self._back_memoization = {}
self.ctx = z3_ctx
# Forward conversion
self.set_function(p_(self.walk_nary, z3.Z3_mk_and), op.AND)
self.set_function(p_(self.walk_nary, z3.Z3_mk_or), op.OR)
self.set_function(p_(self.walk_nary, z3.Z3_mk_add), op.PLUS)
self.set_function(p_(self.walk_nary, z3.Z3_mk_mul), op.TIMES)
self.set_function(p_(self.walk_nary, z3.Z3_mk_sub), op.MINUS)
self.set_function(p_(self.walk_binary, z3.Z3_mk_implies), op.IMPLIES)
self.set_function(p_(self.walk_binary, z3.Z3_mk_le), op.LE)
self.set_function(p_(self.walk_binary, z3.Z3_mk_lt), op.LT)
self.set_function(p_(self.walk_binary, z3.Z3_mk_eq), op.EQUALS, op.IFF)
self.set_function(p_(self.walk_binary, z3.Z3_mk_power), op.POW)
self.set_function(p_(self.walk_binary, z3.Z3_mk_div), op.DIV)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvult), op.BV_ULT)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvule), op.BV_ULE)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvslt), op.BV_SLT)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvsle), op.BV_SLE)
self.set_function(p_(self.walk_binary, z3.Z3_mk_concat), op.BV_CONCAT)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvor), op.BV_OR)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvand), op.BV_AND)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvxor), op.BV_XOR)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvadd), op.BV_ADD)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvsub), op.BV_SUB)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvmul), op.BV_MUL)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvudiv), op.BV_UDIV)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvurem), op.BV_UREM)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvshl), op.BV_LSHL)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvlshr), op.BV_LSHR)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvsdiv), op.BV_SDIV)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvsrem), op.BV_SREM)
self.set_function(p_(self.walk_binary, z3.Z3_mk_bvashr), op.BV_ASHR)
self.set_function(self.walk_quantifier, *op.QUANTIFIERS)
# Back Conversion
self._back_fun = {
z3.Z3_OP_AND: lambda args, expr: self.mgr.And(args),
z3.Z3_OP_OR: lambda args, expr: self.mgr.Or(args),
z3.Z3_OP_MUL: lambda args, expr: self.mgr.Times(args),
z3.Z3_OP_ADD: lambda args, expr: self.mgr.Plus(args),
z3.Z3_OP_DIV: lambda args, expr: self.mgr.Div(args[0], args[1]),
z3.Z3_OP_IFF: lambda args, expr: self.mgr.Iff(args[0], args[1]),
z3.Z3_OP_XOR: lambda args, expr: self.mgr.Xor(args[0], args[1]),
z3.Z3_OP_FALSE: lambda args, expr: self.mgr.FALSE(),
z3.Z3_OP_TRUE: lambda args, expr: self.mgr.TRUE(),
z3.Z3_OP_GT: lambda args, expr: self.mgr.GT(args[0], args[1]),
z3.Z3_OP_GE: lambda args, expr: self.mgr.GE(args[0], args[1]),
z3.Z3_OP_LT: lambda args, expr: self.mgr.LT(args[0], args[1]),
z3.Z3_OP_LE: lambda args, expr: self.mgr.LE(args[0], args[1]),
z3.Z3_OP_SUB: lambda args, expr: self.mgr.Minus(args[0], args[1]),
z3.Z3_OP_NOT: lambda args, expr: self.mgr.Not(args[0]),
z3.Z3_OP_IMPLIES: lambda args, expr: self.mgr.Implies(args[0], args[1]),
z3.Z3_OP_ITE: lambda args, expr: self.mgr.Ite(args[0], args[1], args[2]),
z3.Z3_OP_TO_REAL: lambda args, expr: self.mgr.ToReal(args[0]),
z3.Z3_OP_BAND : lambda args, expr: self.mgr.BVAnd(args[0], args[1]),
z3.Z3_OP_BOR : lambda args, expr: self.mgr.BVOr(args[0], args[1]),
z3.Z3_OP_BXOR : lambda args, expr: self.mgr.BVXor(args[0], args[1]),
z3.Z3_OP_BNOT : lambda args, expr: self.mgr.BVNot(args[0]),
z3.Z3_OP_BNEG : lambda args, expr: self.mgr.BVNeg(args[0]),
z3.Z3_OP_CONCAT : lambda args, expr: self.mgr.BVConcat(args[0], args[1]),
z3.Z3_OP_ULT : lambda args, expr: self.mgr.BVULT(args[0], args[1]),
z3.Z3_OP_ULEQ : lambda args, expr: self.mgr.BVULE(args[0], args[1]),
z3.Z3_OP_SLT : lambda args, expr: self.mgr.BVSLT(args[0], args[1]),
z3.Z3_OP_SLEQ : lambda args, expr: self.mgr.BVSLE(args[0], args[1]),
z3.Z3_OP_UGT : lambda args, expr: self.mgr.BVUGT(args[0], args[1]),
z3.Z3_OP_UGEQ : lambda args, expr: self.mgr.BVUGE(args[0], args[1]),
z3.Z3_OP_SGT : lambda args, expr: self.mgr.BVSGT(args[0], args[1]),
z3.Z3_OP_SGEQ : lambda args, expr: self.mgr.BVSGE(args[0], args[1]),
z3.Z3_OP_BADD : lambda args, expr: self.mgr.BVAdd(args[0], args[1]),
z3.Z3_OP_BMUL : lambda args, expr: self.mgr.BVMul(args[0], args[1]),
z3.Z3_OP_BUDIV : lambda args, expr: self.mgr.BVUDiv(args[0], args[1]),
z3.Z3_OP_BSDIV : lambda args, expr: self.mgr.BVSDiv(args[0], args[1]),
z3.Z3_OP_BUREM : lambda args, expr: self.mgr.BVURem(args[0], args[1]),
z3.Z3_OP_BSREM : lambda args, expr: self.mgr.BVSRem(args[0], args[1]),
z3.Z3_OP_BSHL : lambda args, expr: self.mgr.BVLShl(args[0], args[1]),
z3.Z3_OP_BLSHR : lambda args, expr: self.mgr.BVLShr(args[0], args[1]),
z3.Z3_OP_BASHR : lambda args, expr: self.mgr.BVAShr(args[0], args[1]),
z3.Z3_OP_BSUB : lambda args, expr: self.mgr.BVSub(args[0], args[1]),
z3.Z3_OP_EXT_ROTATE_LEFT : lambda args, expr: self.mgr.BVRol(args[0], args[1].bv_unsigned_value()),
z3.Z3_OP_EXT_ROTATE_RIGHT: lambda args, expr: self.mgr.BVRor(args[0], args[1].bv_unsigned_value()),
z3.Z3_OP_POWER : lambda args, expr: self.mgr.Pow(args[0], args[1]),
z3.Z3_OP_SELECT : lambda args, expr: self.mgr.Select(args[0], args[1]),
z3.Z3_OP_STORE : lambda args, expr: self.mgr.Store(args[0], args[1], args[2]),
# Actually use both args, expr
z3.Z3_OP_SIGN_EXT: lambda args, expr: self.mgr.BVSExt(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ZERO_EXT: lambda args, expr: self.mgr.BVZExt(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ROTATE_LEFT: lambda args, expr: self.mgr.BVRol(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_ROTATE_RIGHT: lambda args, expr: self.mgr.BVRor(args[0], z3.get_payload(expr, 0)),
z3.Z3_OP_EXTRACT: lambda args, expr: self.mgr.BVExtract(args[0],
z3.get_payload(expr, 1),
z3.get_payload(expr, 0)),
# Complex Back Translation
z3.Z3_OP_EQ : self._back_z3_eq,
z3.Z3_OP_UMINUS : self._back_z3_uminus,
z3.Z3_OP_CONST_ARRAY : self._back_z3_const_array,
}
# Unique reference to Sorts
self.z3RealSort = z3.RealSort(self.ctx)
self.z3BoolSort = z3.BoolSort(self.ctx)
self.z3IntSort = z3.IntSort(self.ctx)
self.z3ArraySorts = {}
self._z3BitVecSorts = {}
# Unique reference to Function Declaration
self._z3_func_decl_cache = {}
return
def z3BitVecSort(self, width):
"""Return the z3 BitVecSort for the given width."""
try:
bvsort = self._z3BitVecSorts[width]
except KeyError:
bvsort = z3.BitVecSort(width)
self._z3BitVecSorts[width] = bvsort
return bvsort
@catch_conversion_error
def convert(self, formula):
z3term = self.walk(formula)
if formula.node_type in op.QUANTIFIERS:
return z3.QuantifierRef(z3term, self.ctx)
elif formula.node_type() in BOOLREF_SET:
return z3.BoolRef(z3term, self.ctx)
elif formula.node_type() in ARITHREF_SET:
return z3.ArithRef(z3term, self.ctx)
elif formula.node_type() in BITVECREF_SET:
return z3.BitVecRef(z3term, self.ctx)
elif formula.is_symbol() or formula.is_function_application():
if formula.is_function_application():
type_ = formula.function_name().symbol_type()
type_ = type_.return_type
else:
type_ = formula.symbol_type()
if type_.is_bool_type():
return z3.BoolRef(z3term, self.ctx)
elif type_.is_real_type() or type_.is_int_type():
return z3.ArithRef(z3term, self.ctx)
elif type_.is_array_type():
return z3.ArrayRef(z3term, self.ctx)
elif type_.is_bv_type():
return z3.BitVecRef(z3term, self.ctx)
else:
raise NotImplementedError(formula)
elif formula.node_type() in op.ARRAY_OPERATORS:
return z3.ArrayRef(z3term, self.ctx)
else:
assert formula.is_constant(), formula
type_ = formula.constant_type()
if type_.is_bool_type():
return z3.BoolRef(z3term, self.ctx)
elif type_.is_real_type() or type_.is_int_type():
return z3.ArithRef(z3term, self.ctx)
elif type_.is_array_type():
return z3.ArrayRef(z3term, self.ctx)
elif type_.is_bv_type():
return z3.BitVecRef(z3term, self.ctx)
else:
raise NotImplementedError(formula)
def back(self, expr, model=None):
"""Convert a Z3 expression back into a pySMT expression.
This is done using the Z3 API. For very big expressions, it is
sometimes faster to go through the SMT-LIB format. In those
cases, consider using the method back_via_smtlib.
"""
stack = [expr]
while len(stack) > 0:
current = stack.pop()
key = (askey(current), model)
if key not in self._back_memoization:
self._back_memoization[key] = None
stack.append(current)
for i in xrange(current.num_args()):
stack.append(current.arg(i))
elif self._back_memoization[key] is None:
args = [self._back_memoization[(askey(current.arg(i)), model)]
for i in xrange(current.num_args())]
res = self._back_single_term(current, args, model)
self._back_memoization[key] = res
else:
# we already visited the node, nothing else to do
pass
return self._back_memoization[(askey(expr), model)]
def _back_single_term(self, expr, args, model=None):
assert z3.is_expr(expr)
if z3.is_quantifier(expr):
raise NotImplementedError(
"Quantified back conversion is currently not supported")
assert not len(args) > 2 or \
(z3.is_and(expr) or z3.is_or(expr) or
z3.is_add(expr) or z3.is_mul(expr) or
(len(args) == 3 and (z3.is_ite(expr) or z3.is_array_store(expr)))),\
"Unexpected n-ary term: %s" % expr
res = None
try:
decl = z3.Z3_get_app_decl(expr.ctx_ref(), expr.as_ast())
kind = z3.Z3_get_decl_kind(expr.ctx.ref(), decl)
# Try to get the back-conversion function for the given Kind
fun = self._back_fun[kind]
return fun(args, expr)
except KeyError as ex:
pass
if z3.is_const(expr):
# Const or Symbol
if z3.is_rational_value(expr):
n = expr.numerator_as_long()
d = expr.denominator_as_long()
f = Fraction(n, d)
return self.mgr.Real(f)
elif z3.is_int_value(expr):
n = expr.as_long()
return self.mgr.Int(n)
elif z3.is_bv_value(expr):
n = expr.as_long()
w = expr.size()
return self.mgr.BV(n, w)
elif z3.is_as_array(expr):
if model is None:
raise NotImplementedError("As-array expressions cannot be" \
" handled as they are not " \
"self-contained")
else:
interp_decl = z3.get_as_array_func(expr)
interp = model[interp_decl]
default = self.back(interp.else_value(), model=model)
assign = {}
for i in xrange(interp.num_entries()):
e = interp.entry(i)
assert e.num_args() == 1
idx = self.back(e.arg_value(0), model=model)
val = self.back(e.value(), model=model)
assign[idx] = val
arr_type = self._z3_to_type(expr.sort())
return self.mgr.Array(arr_type.index_type, default, assign)
elif z3.is_algebraic_value(expr):
# Algebraic value
return self.mgr._Algebraic(Numeral(expr))
else:
# it must be a symbol
try:
return self.mgr.get_symbol(str(expr))
except UndefinedSymbolError:
import warnings
symb_type = self._z3_to_type(expr.sort())
warnings.warn("Defining new symbol: %s" % str(res))
return self.mgr.FreshSymbol(symb_type,
template="__z3_%d")
elif z3.is_function(expr):
# This needs to be after we try to convert regular Symbols
fsymbol = self.mgr.get_symbol(expr.decl().name())
return self.mgr.Function(fsymbol, args)
# If we reach this point, we did not manage to translate the expression
raise ConvertExpressionError(message=("Unsupported expression: %s" %
str(expr)),
expression=expr)
def _back_z3_eq(self, args, expr):
if self._get_type(args[0]).is_bool_type():
return self.mgr.Iff(args[0], args[1])
return self.mgr.Equals(args[0], args[1])
def _back_z3_uminus(self, args, expr):
tp = self._get_type(args[0])
if tp.is_real_type():
minus_one = self.mgr.Real(-1)
else:
assert tp.is_int_type()
minus_one = self.mgr.Int(-1)
return self.mgr.Times(args[0], minus_one)
def _back_z3_const_array(self, args, expr):
arr_ty = self._z3_to_type(expr.sort())
return self.mgr.Array(arr_ty.index_type, args[0])
def back_via_smtlib(self, expr):
"""Back convert a Z3 Expression by translation to SMT-LIB."""
from six import StringIO
from pysmt.smtlib.parser import SmtLibZ3Parser
parser = SmtLibZ3Parser(self.env)
z3.Z3_set_ast_print_mode(expr.ctx.ref(), z3.Z3_PRINT_SMTLIB2_COMPLIANT)
s = z3.Z3_benchmark_to_smtlib_string(expr.ctx.ref(),
None, None,
None, None,
0, None,
expr.ast)
stream_in = StringIO(s)
r = parser.get_script(stream_in).get_last_formula(self.mgr)
key = (askey(expr), None)
self._back_memoization[key] = r
return r
# Fwd Conversion
def _to_ast_array(self, args):
"""Convert a list of arguments into an z3.AST vector."""
sz = len(args)
_args = (z3.Ast * sz)()
for i, arg in enumerate(args):
_args[i] = arg
return _args, sz
def walk_nary(self, func, formula, args, **kwargs):
_args, sz = self._to_ast_array(args)
z3term = func(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_binary(self, func, formula, args, **kwargs):
z3term = func(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_not(self, formula, args, **kwargs):
z3term = z3.Z3_mk_not(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_symbol(self, formula, **kwargs):
symbol_type = formula.symbol_type()
sname = formula.symbol_name()
z3_sname = z3.Z3_mk_string_symbol(self.ctx.ref(), sname)
if symbol_type.is_bool_type():
sort_ast = self.z3BoolSort.ast
elif symbol_type.is_real_type():
sort_ast = self.z3RealSort.ast
elif symbol_type.is_int_type():
sort_ast = self.z3IntSort.ast
elif symbol_type.is_array_type():
sort_ast = self._type_to_z3(symbol_type).ast
else:
assert symbol_type.is_bv_type()
sort_ast = self._type_to_z3(symbol_type).ast
# Create const with given sort
res = z3.Z3_mk_const(self.ctx.ref(), z3_sname, sort_ast)
z3.Z3_inc_ref(self.ctx.ref(), res)
return res
def walk_ite(self, formula, args, **kwargs):
i = args[0]
ni = self.walk_not(None, (i,))
t = args[1]
e = args[2]
if self._get_type(formula).is_bool_type():
# Rewrite as (!i \/ t) & (i \/ e)
_args, sz = self._to_ast_array((ni, t))
or1 = z3.Z3_mk_or(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), or1)
_args, sz = self._to_ast_array((i, e))
or2 = z3.Z3_mk_or(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), or2)
_args, sz = self._to_ast_array((or1, or2))
z3term = z3.Z3_mk_and(self.ctx.ref(), sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
z3.Z3_dec_ref(self.ctx.ref(), or1)
z3.Z3_dec_ref(self.ctx.ref(), or2)
return z3term
z3term = z3.Z3_mk_ite(self.ctx.ref(), i, t, e)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_real_constant(self, formula, **kwargs):
frac = formula.constant_value()
n,d = frac.numerator, frac.denominator
rep = str(n) + "/" + str(d)
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
rep,
self.z3RealSort.ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_int_constant(self, formula, **kwargs):
assert is_pysmt_integer(formula.constant_value())
const = str(formula.constant_value())
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
const,
self.z3IntSort.ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bool_constant(self, formula, **kwargs):
_t = z3.BoolVal(formula.constant_value(), ctx=self.ctx)
z3term = _t.as_ast()
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_quantifier(self, formula, args, **kwargs):
qvars = formula.quantifier_vars()
qvars, qvars_sz = self._to_ast_array([self.walk_symbol(x)\
for x in qvars])
empty_str = z3.Z3_mk_string_symbol(self.ctx.ref(), "")
z3term = z3.Z3_mk_quantifier_const_ex(self.ctx.ref(),
formula.is_forall(),
1, empty_str, empty_str,
qvars_sz, qvars,
0, None, 0, None,
args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_toreal(self, formula, args, **kwargs):
z3term = z3.Z3_mk_int2real(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def _z3_func_decl(self, func_name):
"""Create a Z3 Function Declaration for the given function."""
try:
return self._z3_func_decl_cache[func_name]
except KeyError:
tp = func_name.symbol_type()
arity = len(tp.param_types)
z3dom = (z3.Sort * arity)()
for i, t in enumerate(tp.param_types):
z3dom[i] = self._type_to_z3(t).ast
z3ret = self._type_to_z3(tp.return_type).ast
z3name = z3.Z3_mk_string_symbol(self.ctx.ref(),
func_name.symbol_name())
z3func = z3.Z3_mk_func_decl(self.ctx.ref(), z3name,
arity, z3dom, z3ret)
self._z3_func_decl_cache[func_name] = z3func
return z3func
def walk_function(self, formula, args, **kwargs):
z3func = self._z3_func_decl(formula.function_name())
_args, sz = self._to_ast_array(args)
z3term = z3.Z3_mk_app(self.ctx.ref(), z3func, sz, _args)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_constant(self, formula, **kwargs):
value = formula.constant_value()
z3term = z3.Z3_mk_numeral(self.ctx.ref(),
str(value),
self.z3BitVecSort(formula.bv_width()).ast)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_extract(self, formula, args, **kwargs):
z3term = z3.Z3_mk_extract(self.ctx.ref(),
formula.bv_extract_end(),
formula.bv_extract_start(),
args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_not(self, formula, args, **kwargs):
z3term = z3.Z3_mk_bvnot(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_neg(self, formula, args, **kwargs):
z3term = z3.Z3_mk_bvneg(self.ctx.ref(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_rol(self, formula, args, **kwargs):
bvsort = self.z3BitVecSort(formula.bv_width())
step = z3.Z3_mk_numeral(self.ctx.ref(),
str(formula.bv_rotation_step()),
bvsort.ast)
z3term = z3.Z3_mk_ext_rotate_left(self.ctx.ref(),
args[0], step)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_ror(self, formula, args, **kwargs):
bvsort = self.z3BitVecSort(formula.bv_width())
step = z3.Z3_mk_numeral(self.ctx.ref(),
str(formula.bv_rotation_step()),
bvsort.ast)
z3term = z3.Z3_mk_ext_rotate_right(self.ctx.ref(),
args[0], step)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_zext(self, formula, args, **kwargs):
z3term = z3.Z3_mk_zero_ext(self.ctx.ref(),
formula.bv_extend_step(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_sext (self, formula, args, **kwargs):
z3term = z3.Z3_mk_sign_ext(self.ctx.ref(),
formula.bv_extend_step(), args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_bv_comp (self, formula, args, **kwargs):
cond = z3.Z3_mk_eq(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), cond)
then_ = z3.Z3_mk_numeral(self.ctx.ref(), "1", self.z3BitVecSort(1).ast)
z3.Z3_inc_ref(self.ctx.ref(), then_)
else_ = z3.Z3_mk_numeral(self.ctx.ref(), "0", self.z3BitVecSort(1).ast)
z3.Z3_inc_ref(self.ctx.ref(), else_)
z3term = z3.Z3_mk_ite(self.ctx.ref(), cond, then_, else_)
z3.Z3_inc_ref(self.ctx.ref(), z3term)
# De-Ref since this is handled internally by Z3
z3.Z3_dec_ref(self.ctx.ref(), cond)
z3.Z3_dec_ref(self.ctx.ref(), then_)
z3.Z3_dec_ref(self.ctx.ref(), else_)
return z3term
def walk_array_select(self, formula, args, **kwargs):
z3term = z3.Z3_mk_select(self.ctx.ref(), args[0], args[1])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_array_store(self, formula, args, **kwargs):
z3term = z3.Z3_mk_store(self.ctx.ref(), args[0], args[1], args[2])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def walk_array_value(self, formula, args, **kwargs):
idx_type = formula.array_value_index_type()
arraysort = self._type_to_z3(idx_type).ast
z3term = z3.Z3_mk_const_array(self.ctx.ref(), arraysort, args[0])
z3.Z3_inc_ref(self.ctx.ref(), z3term)
for i in xrange(1, len(args), 2):
c = args[i]
z3term = self.walk_array_store(None, (z3term, c, args[i+1]))
z3.Z3_inc_ref(self.ctx.ref(), z3term)
return z3term
def _z3_to_type(self, sort):
if sort.kind() == z3.Z3_BOOL_SORT:
return types.BOOL
elif sort.kind() == z3.Z3_INT_SORT:
return types.INT
elif sort.kind() == z3.Z3_REAL_SORT:
return types.REAL
elif sort.kind() == z3.Z3_ARRAY_SORT:
return types.ArrayType(self._z3_to_type(sort.domain()),
self._z3_to_type(sort.range()))
elif sort.kind() == z3.Z3_BV_SORT:
return types.BVType(sort.size())
else:
raise NotImplementedError("Unsupported sort in conversion: %s" % sort)
def _type_to_z3(self, tp):
"""Convert a pySMT type into the corresponding Z3 sort."""
if tp.is_bool_type():
return self.z3BoolSort
elif tp.is_real_type():
return self.z3RealSort
elif tp.is_int_type():
return self.z3IntSort
elif tp.is_array_type():
key_sort = self._type_to_z3(tp.index_type)
val_sort = self._type_to_z3(tp.elem_type)
try:
return self.z3ArraySorts[(askey(key_sort),
askey(val_sort))]
except KeyError:
sort = z3.ArraySort(key_sort, val_sort)
self.z3ArraySorts[(askey(key_sort),
askey(val_sort))] = sort
return sort
else:
assert tp.is_bv_type() , "Unsupported type '%s'" % tp
return self.z3BitVecSort(tp.width)
raise NotImplementedError("Unsupported type in conversion: %s" % tp)
def __del__(self):
# Cleaning-up Z3Converter requires dec-ref'ing the terms in the cache
for t in self.memoization.values():
z3.Z3_dec_ref(self.ctx.ref(), t)
# EOC Z3Converter
class Z3QuantifierEliminator(QuantifierEliminator):
LOGICS = [LIA, LRA]
def __init__(self, environment, logic=None):
QuantifierEliminator.__init__(self)
self.environment = environment
self.logic = logic
self.converter = Z3Converter(environment, z3._get_ctx(None))
def eliminate_quantifiers(self, formula):
logic = get_logic(formula, self.environment)
if not logic <= LRA and not logic <= LIA:
raise PysmtValueError("Z3 quantifier elimination only "\
"supports LRA or LIA without combination."\
"(detected logic is: %s)" % str(logic))
simplifier = z3.Tactic('simplify')
eliminator = z3.Tactic('qe')
f = self.converter.convert(formula)
s = simplifier(f, elim_and=True,
pull_cheap_ite=True,
ite_extra_rules=True).as_expr()
res = eliminator(s, eliminate_variables_as_block=True).as_expr()
pysmt_res = None
try:
pysmt_res = self.converter.back(res)
except ConvertExpressionError:
if logic <= LRA:
raise
raise ConvertExpressionError(message=("Unable to represent" \
"expression %s in pySMT: the quantifier elimination for " \
"LIA is incomplete as it requires the modulus. You can " \
"find the Z3 expression representing the quantifier " \
"elimination as the attribute 'expression' of this " \
"exception object" % str(res)),
expression=res)
return pysmt_res
def _exit(self):
pass
class Z3Interpolator(Interpolator):
LOGICS = [QF_UFLIA, QF_UFLRA]
def __init__(self, environment, logic=None):
Interpolator.__init__(self)
self.environment = environment
self.logic = logic
self.converter = Z3Converter(environment, z3_ctx=z3._get_ctx(None))
def _check_logic(self, formulas):
for f in formulas:
logic = get_logic(f, self.environment)
ok = any(logic <= l for l in self.LOGICS)
if not ok:
raise PysmtValueError("Logic not supported by Z3 interpolation."
"(detected logic is: %s)" % str(logic))
def binary_interpolant(self, a, b):
self._check_logic([a, b])
a = self.converter.convert(a)
b = self.converter.convert(b)
try:
itp = z3.binary_interpolant(a, b)
pysmt_res = self.converter.back(itp)
except z3.ModelRef:
pysmt_res = None
return pysmt_res
def sequence_interpolant(self, formulas):
self._check_logic(formulas)
zf = [self.converter.convert(f) for f in formulas]
try:
itp = z3.sequence_interpolant(zf)
pysmt_res = [self.converter.back(f) for f in itp]
except z3.ModelRef:
pysmt_res = None
return pysmt_res
def _exit(self):
pass
|
agriggio/pysmt
|
pysmt/solvers/z3.py
|
Python
|
apache-2.0
| 40,131
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-28 08:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0002_auto_20170728_1008'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='post_author',
new_name='news_author',
),
]
|
DarkArtek/FFXIVITAFC
|
news/migrations/0003_auto_20170728_1009.py
|
Python
|
unlicense
| 431
|
from unittest import TestCase
from scrapy.http import Response, Request
from scrapy.spider import Spider
from scrapy.contrib.spidermiddleware.offsite import OffsiteMiddleware
from scrapy.utils.test import get_crawler
class TestOffsiteMiddleware(TestCase):
def setUp(self):
self.spider = self._get_spider()
crawler = get_crawler()
self.mw = OffsiteMiddleware.from_crawler(crawler)
self.mw.spider_opened(self.spider)
def _get_spider(self):
return Spider('foo', allowed_domains=['scrapytest.org', 'scrapy.org'])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
onsite_reqs = [Request('http://scrapytest.org/1'),
Request('http://scrapy.org/1'),
Request('http://sub.scrapy.org/1'),
Request('http://offsite.tld/letmepass', dont_filter=True)]
offsite_reqs = [Request('http://scrapy2.org'),
Request('http://offsite.tld/'),
Request('http://offsite.tld/scrapytest.org'),
Request('http://offsite.tld/rogue.scrapytest.org'),
Request('http://rogue.scrapytest.org.haha.com'),
Request('http://roguescrapytest.org')]
reqs = onsite_reqs + offsite_reqs
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, onsite_reqs)
class TestOffsiteMiddleware2(TestOffsiteMiddleware):
def _get_spider(self):
return Spider('foo', allowed_domains=None)
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEquals(out, reqs)
class TestOffsiteMiddleware3(TestOffsiteMiddleware2):
def _get_spider(self):
return Spider('foo')
|
joshlk/scrapy
|
scrapy/tests/test_spidermiddleware_offsite.py
|
Python
|
bsd-3-clause
| 1,982
|
# Author: Tyler Fenby <tylerfenby@gmail.com>
# URL: https://sickrage.tv
# Git: https://github.com/SiCKRAGETV/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib
import datetime
from sickbeard import db
from sickbeard import logger
from sickbeard.exceptions import ex, EpisodeNotFoundException
from sickbeard.common import Quality
from sickbeard.common import WANTED, FAILED
from sickrage.helper.encoding import ss
from sickrage.show.History import History
def prepareFailedName(release):
"""Standardizes release name for failed DB"""
fixed = urllib.unquote(release)
if fixed.endswith(".nzb"):
fixed = fixed.rpartition(".")[0]
fixed = re.sub("[\.\-\+\ ]", "_", fixed)
fixed = ss(fixed)
return fixed
def logFailed(release):
log_str = u""
size = -1
provider = ""
release = prepareFailedName(release)
myDB = db.DBConnection('failed.db')
sql_results = myDB.select("SELECT * FROM history WHERE release=?", [release])
if len(sql_results) == 0:
logger.log(
u"Release not found in snatch history.", logger.WARNING)
elif len(sql_results) > 1:
logger.log(u"Multiple logged snatches found for release", logger.WARNING)
sizes = len(set(x["size"] for x in sql_results))
providers = len(set(x["provider"] for x in sql_results))
if sizes == 1:
logger.log(u"However, they're all the same size. Continuing with found size.", logger.WARNING)
size = sql_results[0]["size"]
else:
logger.log(
u"They also vary in size. Deleting the logged snatches and recording this release with no size/provider",
logger.WARNING)
for result in sql_results:
deleteLoggedSnatch(result["release"], result["size"], result["provider"])
if providers == 1:
logger.log(u"They're also from the same provider. Using it as well.")
provider = sql_results[0]["provider"]
else:
size = sql_results[0]["size"]
provider = sql_results[0]["provider"]
if not hasFailed(release, size, provider):
myDB = db.DBConnection('failed.db')
myDB.action("INSERT INTO failed (release, size, provider) VALUES (?, ?, ?)", [release, size, provider])
deleteLoggedSnatch(release, size, provider)
return log_str
def logSuccess(release):
release = prepareFailedName(release)
myDB = db.DBConnection('failed.db')
myDB.action("DELETE FROM history WHERE release=?", [release])
def hasFailed(release, size, provider="%"):
"""
Returns True if a release has previously failed.
If provider is given, return True only if the release is found
with that specific provider. Otherwise, return True if the release
is found with any provider.
:param release: Release name to record failure
:param size: Size of release
:param provider: Specific provider to search (defaults to all providers)
:return: True if a release has previously failed.
"""
release = prepareFailedName(release)
myDB = db.DBConnection('failed.db')
sql_results = myDB.select(
"SELECT * FROM failed WHERE release=? AND size=? AND provider LIKE ?",
[release, size, provider])
return (len(sql_results) > 0)
def revertEpisode(epObj):
"""Restore the episodes of a failed download to their original state"""
myDB = db.DBConnection('failed.db')
sql_results = myDB.select("SELECT * FROM history WHERE showid=? AND season=?",
[epObj.show.indexerid, epObj.season])
history_eps = dict([(res["episode"], res) for res in sql_results])
try:
logger.log(u"Reverting episode (%s, %s): %s" % (epObj.season, epObj.episode, epObj.name))
with epObj.lock:
if epObj.episode in history_eps:
logger.log(u"Found in history")
epObj.status = history_eps[epObj.episode]['old_status']
else:
logger.log(u"WARNING: Episode not found in history. Setting it back to WANTED",
logger.WARNING)
epObj.status = WANTED
epObj.saveToDB()
except EpisodeNotFoundException, e:
logger.log(u"Unable to create episode, please set its status manually: " + ex(e),
logger.WARNING)
def markFailed(epObj):
"""
Mark an episode as failed
:param epObj: Episode object to mark as failed
:return: empty string
"""
log_str = u""
try:
with epObj.lock:
quality = Quality.splitCompositeStatus(epObj.status)[1]
epObj.status = Quality.compositeStatus(FAILED, quality)
epObj.saveToDB()
except EpisodeNotFoundException, e:
logger.log(u"Unable to get episode, please set its status manually: " + ex(e), logger.WARNING)
return log_str
def logSnatch(searchResult):
"""
Logs a successful snatch
:param searchResult: Search result that was successful
"""
logDate = datetime.datetime.today().strftime(History.date_format)
release = prepareFailedName(searchResult.name)
providerClass = searchResult.provider
if providerClass is not None:
provider = providerClass.name
else:
provider = "unknown"
show_obj = searchResult.episodes[0].show
myDB = db.DBConnection('failed.db')
for episode in searchResult.episodes:
myDB.action(
"INSERT INTO history (date, size, release, provider, showid, season, episode, old_status)"
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
[logDate, searchResult.size, release, provider, show_obj.indexerid, episode.season, episode.episode,
episode.status])
def deleteLoggedSnatch(release, size, provider):
"""
Remove a snatch from history
:param release: release to delete
:param size: Size of release
:param provider: Provider to delete it from
"""
release = prepareFailedName(release)
myDB = db.DBConnection('failed.db')
myDB.action("DELETE FROM history WHERE release=? AND size=? AND provider=?",
[release, size, provider])
def trimHistory():
"""Trims history table to 1 month of history from today"""
myDB = db.DBConnection('failed.db')
myDB.action("DELETE FROM history WHERE date < " + str(
(datetime.datetime.today() - datetime.timedelta(days=30)).strftime(History.date_format)))
def findRelease(epObj):
"""
Find releases in history by show ID and season.
Return None for release if multiple found or no release found.
"""
release = None
provider = None
# Clear old snatches for this release if any exist
myDB = db.DBConnection('failed.db')
myDB.action("DELETE FROM history WHERE showid=" + str(epObj.show.indexerid) + " AND season=" + str(
epObj.season) + " AND episode=" + str(
epObj.episode) + " AND date < (SELECT max(date) FROM history WHERE showid=" + str(
epObj.show.indexerid) + " AND season=" + str(epObj.season) + " AND episode=" + str(epObj.episode) + ")")
# Search for release in snatch history
results = myDB.select("SELECT release, provider, date FROM history WHERE showid=? AND season=? AND episode=?",
[epObj.show.indexerid, epObj.season, epObj.episode])
for result in results:
release = str(result["release"])
provider = str(result["provider"])
date = result["date"]
# Clear any incomplete snatch records for this release if any exist
myDB.action("DELETE FROM history WHERE release=? AND date!=?", [release, date])
# Found a previously failed release
logger.log(u"Failed release found for season (%s): (%s)" % (epObj.season, result["release"]), logger.DEBUG)
return (release, provider)
# Release was not found
logger.log(u"No releases found for season (%s) of (%s)" % (epObj.season, epObj.show.indexerid), logger.DEBUG)
return (release, provider)
|
xNovax/SickRage
|
sickbeard/failed_history.py
|
Python
|
gpl-3.0
| 8,655
|
from xbeecoord import getRssi
from time import sleep
while True:
getRssi()
sleep(2)
|
updewsprado/updews-datalogger
|
rpi/LUNAS/rssi.py
|
Python
|
unlicense
| 87
|
"""
This module is responsible for shimming out the auto generated libraries found
under library.py. The intension for the extension classes is to fix up or
improve on the default COM API behaviour and auto generated Python library
file when interacting through an Interface to the Main library API.
"""
from __future__ import absolute_import
import inspect
from virtualbox import library
# Import extension modules
from .vbox import IVirtualBox # noqa: F401
from .session import ISession # noqa: F401
from .keyboard import IKeyboard # noqa: F401
from .guest_session import IGuestSession # noqa: F401
from .guest import IGuest # noqa: F401
from .host import IHost # noqa: F401
from .machine import IMachine # noqa: F401
from .progress import IProgress # noqa: F401
from .console import IConsole # noqa: F401
from .event_source import IEventSource # noqa: F401
from .mouse import IMouse # noqa: F401
from .process import IProcess # noqa: F401
from .guest_process import IGuestProcess # noqa: F401
from .appliance import IAppliance # noqa: F401
from .virtual_system_description import IVirtualSystemDescription # noqa: F401
# Replace original with extension
for k, v in [a for a in locals().items()]:
if not inspect.isclass(v):
continue
if issubclass(v, library.Interface):
setattr(library, k, v)
|
mjdorma/pyvbox
|
virtualbox/library_ext/__init__.py
|
Python
|
apache-2.0
| 1,345
|
#!/usr/bin/python
#This is the old version of Tactix befor I decided to make the C version.
#I hope you all like it.
#I wanted to include it to make sure even if you only have a monochrome display you can still play!
# * HORRAY FOR GAMERS!
#I may very well have modded this a bit too, just so it feels more like the full version.
import os #needed for screen clear call.
#I had to grab this off of stack overflow...
#WHAT?! I NEVER SAID I WAS THE BEST PROGRAMMER ON THE PLANNET!
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
### Use get(key[, default]) instead of a try/catch
#try:
# cr = (env['LINES'], env['COLUMNS'])
#except:
# cr = (25, 80)
return int(cr[1]), int(cr[0])
###MY_CODE \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
#initiate our character size data so we can calculate how large our screen can be.
#this kindof sucks because doing screen size calculations this way means the user can't
#resize mid gameplay without screwing it up...
class window:
edge='+'
line_x='-'
line_y='|'
def __init__(self,width,height):
self.width=width
self.height=height
self.data=[None]*(width*height)
#content padding (by default it's one because we don't want to overwrite our border.)
self.c=1
#fill border.
for(l=0;self.data[l];l+=1):
if(l==0 || l==width || l==(height*width) || l==((height*width)-(width+1))):
self.data[l]=self.edge
#keep working here...
def draw(xoff,yoff,width,height,data):
exit=width*height
for(l=0;data[l]||l>exit;l=+1):
self.data[((c+xoff+l)*(c+yoff))] = data[l]
#outputs everything to screen.
def flush():
#... stuff...
global character_field
character_field = getTerminalSize()
#text orientation.
#def t_orient(text,x,y,maxx,maxy):
#quadrants work as such
#
# +-----------------+
# | | | |
# | 0,0 | 1,0 | 2,0 |
# +-----------------+
# | | | |
# | 0,1 | 1,1 | 2,1 |
# +-----------------+
# | | | |
# | 0,2 | 1,2 | 2,2 |
# +-----------------+
class game:
main_menu = window(character_field[0],character_field[1]);
def __init__(self):
main_menu
#debug section.
print(game.main_menu.width)
print(game.main_menu.height)
#eof, cause Imma stup.
|
M3TIOR/Tactix
|
Tactix.py
|
Python
|
mit
| 2,700
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GLES2 command buffers."""
import filecmp
import os
import os.path
import sys
from optparse import OptionParser
import build_cmd_buffer_lib
# Named type info object represents a named type that is used in OpenGL call
# arguments. Each named type defines a set of valid OpenGL call arguments. The
# named types are used in 'gles2_cmd_buffer_functions.txt'.
# type: The actual GL type of the named type.
# valid: The list of values that are valid for both the client and the service.
# valid_es3: The list of values that are valid in OpenGL ES 3, but not ES 2.
# invalid: Examples of invalid values for the type. At least these values
# should be tested to be invalid.
# deprecated_es3: The list of values that are valid in OpenGL ES 2, but
# deprecated in ES 3.
# is_complete: The list of valid values of type are final and will not be
# modified during runtime.
# validator: If set to False will prevent creation of a ValueValidator. Values
# are still expected to be checked for validity and will be tested.
_NAMED_TYPE_INFO = {
'BlitFilter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
'invalid': [
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'CoverageModulationComponents': {
'type': 'GLenum',
'valid': [
'GL_RGB', 'GL_RGBA', 'GL_ALPHA', 'GL_NONE'
],
},
'FramebufferTarget': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER',
],
'valid_es3': [
'GL_DRAW_FRAMEBUFFER' ,
'GL_READ_FRAMEBUFFER' ,
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'RenderBufferTarget': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER',
],
'invalid': [
'GL_FRAMEBUFFER',
],
},
'BufferTarget': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_ARRAY_BUFFER',
'GL_ELEMENT_ARRAY_BUFFER',
],
'valid_es3': [
'GL_COPY_READ_BUFFER',
'GL_COPY_WRITE_BUFFER',
'GL_PIXEL_PACK_BUFFER',
'GL_PIXEL_UNPACK_BUFFER',
'GL_TRANSFORM_FEEDBACK_BUFFER',
'GL_UNIFORM_BUFFER',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'IndexedBufferTarget': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_TRANSFORM_FEEDBACK_BUFFER',
'GL_UNIFORM_BUFFER',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'MapBufferAccess': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_MAP_READ_BIT',
'GL_MAP_WRITE_BIT',
'GL_MAP_INVALIDATE_RANGE_BIT',
'GL_MAP_INVALIDATE_BUFFER_BIT',
'GL_MAP_FLUSH_EXPLICIT_BIT',
'GL_MAP_UNSYNCHRONIZED_BIT',
],
'invalid': [
'GL_SYNC_FLUSH_COMMANDS_BIT',
],
},
'Bufferiv': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_COLOR',
'GL_STENCIL',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'Bufferuiv': {
'type': 'GLenum',
'valid': [
'GL_COLOR',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'Bufferfv': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_COLOR',
'GL_DEPTH',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'Bufferfi': {
'type': 'GLenum',
'valid': [
'GL_DEPTH_STENCIL',
],
'invalid': [
'GL_RENDERBUFFER',
],
},
'BufferUsage': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_STREAM_DRAW',
'GL_STATIC_DRAW',
'GL_DYNAMIC_DRAW',
],
'valid_es3': [
'GL_STREAM_READ',
'GL_STREAM_COPY',
'GL_STATIC_READ',
'GL_STATIC_COPY',
'GL_DYNAMIC_READ',
'GL_DYNAMIC_COPY',
],
'invalid': [
'GL_NONE',
],
},
'CompressedTextureFormat': {
'type': 'GLenum',
'valid': [
],
'valid_es3': [
],
},
'GLState': {
'type': 'GLenum',
'valid': [
# NOTE: State an Capability entries added later.
'GL_ACTIVE_TEXTURE',
'GL_ALIASED_LINE_WIDTH_RANGE',
'GL_ALIASED_POINT_SIZE_RANGE',
'GL_ALPHA_BITS',
'GL_ARRAY_BUFFER_BINDING',
'GL_BLUE_BITS',
'GL_COMPRESSED_TEXTURE_FORMATS',
'GL_CURRENT_PROGRAM',
'GL_DEPTH_BITS',
'GL_DEPTH_RANGE',
'GL_ELEMENT_ARRAY_BUFFER_BINDING',
'GL_FRAMEBUFFER_BINDING',
'GL_GENERATE_MIPMAP_HINT',
'GL_GREEN_BITS',
'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
'GL_IMPLEMENTATION_COLOR_READ_TYPE',
'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
'GL_MAX_RENDERBUFFER_SIZE',
'GL_MAX_TEXTURE_IMAGE_UNITS',
'GL_MAX_TEXTURE_SIZE',
'GL_MAX_VARYING_VECTORS',
'GL_MAX_VERTEX_ATTRIBS',
'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
'GL_MAX_VERTEX_UNIFORM_VECTORS',
'GL_MAX_VIEWPORT_DIMS',
'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
'GL_NUM_SHADER_BINARY_FORMATS',
'GL_PACK_ALIGNMENT',
'GL_RED_BITS',
'GL_RENDERBUFFER_BINDING',
'GL_SAMPLE_BUFFERS',
'GL_SAMPLE_COVERAGE_INVERT',
'GL_SAMPLE_COVERAGE_VALUE',
'GL_SAMPLES',
'GL_SCISSOR_BOX',
'GL_SHADER_BINARY_FORMATS',
'GL_SHADER_COMPILER',
'GL_SUBPIXEL_BITS',
'GL_STENCIL_BITS',
'GL_TEXTURE_BINDING_2D',
'GL_TEXTURE_BINDING_CUBE_MAP',
'GL_TEXTURE_FILTERING_HINT_CHROMIUM',
'GL_UNPACK_ALIGNMENT',
'GL_BIND_GENERATES_RESOURCE_CHROMIUM',
# we can add this because we emulate it if the driver does not support it.
'GL_VERTEX_ARRAY_BINDING_OES',
'GL_VIEWPORT',
],
'valid_es3': [
'GL_COPY_READ_BUFFER_BINDING',
'GL_COPY_WRITE_BUFFER_BINDING',
'GL_DRAW_BUFFER0',
'GL_DRAW_BUFFER1',
'GL_DRAW_BUFFER2',
'GL_DRAW_BUFFER3',
'GL_DRAW_BUFFER4',
'GL_DRAW_BUFFER5',
'GL_DRAW_BUFFER6',
'GL_DRAW_BUFFER7',
'GL_DRAW_BUFFER8',
'GL_DRAW_BUFFER9',
'GL_DRAW_BUFFER10',
'GL_DRAW_BUFFER11',
'GL_DRAW_BUFFER12',
'GL_DRAW_BUFFER13',
'GL_DRAW_BUFFER14',
'GL_DRAW_BUFFER15',
'GL_DRAW_FRAMEBUFFER_BINDING',
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
'GL_GPU_DISJOINT_EXT',
'GL_MAJOR_VERSION',
'GL_MAX_3D_TEXTURE_SIZE',
'GL_MAX_ARRAY_TEXTURE_LAYERS',
'GL_MAX_COLOR_ATTACHMENTS',
'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS',
'GL_MAX_COMBINED_UNIFORM_BLOCKS',
'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS',
'GL_MAX_DRAW_BUFFERS',
'GL_MAX_ELEMENT_INDEX',
'GL_MAX_ELEMENTS_INDICES',
'GL_MAX_ELEMENTS_VERTICES',
'GL_MAX_FRAGMENT_INPUT_COMPONENTS',
'GL_MAX_FRAGMENT_UNIFORM_BLOCKS',
'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',
'GL_MAX_PROGRAM_TEXEL_OFFSET',
'GL_MAX_SAMPLES',
'GL_MAX_SERVER_WAIT_TIMEOUT',
'GL_MAX_TEXTURE_LOD_BIAS',
'GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS',
'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS',
'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS',
'GL_MAX_UNIFORM_BLOCK_SIZE',
'GL_MAX_UNIFORM_BUFFER_BINDINGS',
'GL_MAX_VARYING_COMPONENTS',
'GL_MAX_VERTEX_OUTPUT_COMPONENTS',
'GL_MAX_VERTEX_UNIFORM_BLOCKS',
'GL_MAX_VERTEX_UNIFORM_COMPONENTS',
'GL_MIN_PROGRAM_TEXEL_OFFSET',
'GL_MINOR_VERSION',
'GL_NUM_EXTENSIONS',
'GL_NUM_PROGRAM_BINARY_FORMATS',
'GL_PACK_ROW_LENGTH',
'GL_PACK_SKIP_PIXELS',
'GL_PACK_SKIP_ROWS',
'GL_PIXEL_PACK_BUFFER_BINDING',
'GL_PIXEL_UNPACK_BUFFER_BINDING',
'GL_PROGRAM_BINARY_FORMATS',
'GL_READ_BUFFER',
'GL_READ_FRAMEBUFFER_BINDING',
'GL_SAMPLER_BINDING',
'GL_TIMESTAMP_EXT',
'GL_TEXTURE_BINDING_2D_ARRAY',
'GL_TEXTURE_BINDING_3D',
'GL_TRANSFORM_FEEDBACK_BINDING',
'GL_TRANSFORM_FEEDBACK_ACTIVE',
'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING',
'GL_TRANSFORM_FEEDBACK_PAUSED',
'GL_UNIFORM_BUFFER_BINDING',
'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT',
'GL_UNPACK_IMAGE_HEIGHT',
'GL_UNPACK_ROW_LENGTH',
'GL_UNPACK_SKIP_IMAGES',
'GL_UNPACK_SKIP_PIXELS',
'GL_UNPACK_SKIP_ROWS',
'GL_BLEND_EQUATION_RGB',
'GL_BLEND_EQUATION_ALPHA',
'GL_BLEND_SRC_RGB',
'GL_BLEND_SRC_ALPHA',
'GL_BLEND_DST_RGB',
'GL_BLEND_DST_ALPHA',
'GL_COLOR_WRITEMASK',
# GL_VERTEX_ARRAY_BINDING is the same as GL_VERTEX_ARRAY_BINDING_OES
# 'GL_VERTEX_ARRAY_BINDING',
],
'invalid': [
'GL_FOG_HINT',
],
},
'IndexedGLState': {
'type': 'GLenum',
'valid': [
'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING',
'GL_TRANSFORM_FEEDBACK_BUFFER_SIZE',
'GL_TRANSFORM_FEEDBACK_BUFFER_START',
'GL_UNIFORM_BUFFER_BINDING',
'GL_UNIFORM_BUFFER_SIZE',
'GL_UNIFORM_BUFFER_START',
'GL_BLEND_EQUATION_RGB',
'GL_BLEND_EQUATION_ALPHA',
'GL_BLEND_SRC_RGB',
'GL_BLEND_SRC_ALPHA',
'GL_BLEND_DST_RGB',
'GL_BLEND_DST_ALPHA',
'GL_COLOR_WRITEMASK',
],
'invalid': [
'GL_FOG_HINT',
],
},
'GetTexParamTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'valid_es3': [
'GL_TEXTURE_2D_ARRAY',
'GL_TEXTURE_3D',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'ReadBuffer': {
'type': 'GLenum',
'valid': [
'GL_NONE',
'GL_BACK',
'GL_COLOR_ATTACHMENT0',
'GL_COLOR_ATTACHMENT1',
'GL_COLOR_ATTACHMENT2',
'GL_COLOR_ATTACHMENT3',
'GL_COLOR_ATTACHMENT4',
'GL_COLOR_ATTACHMENT5',
'GL_COLOR_ATTACHMENT6',
'GL_COLOR_ATTACHMENT7',
'GL_COLOR_ATTACHMENT8',
'GL_COLOR_ATTACHMENT9',
'GL_COLOR_ATTACHMENT10',
'GL_COLOR_ATTACHMENT11',
'GL_COLOR_ATTACHMENT12',
'GL_COLOR_ATTACHMENT13',
'GL_COLOR_ATTACHMENT14',
'GL_COLOR_ATTACHMENT15',
],
'invalid': [
'GL_RENDERBUFFER',
]
},
'TextureTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'TextureFboTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
],
'invalid': [
'GL_PROXY_TEXTURE_CUBE_MAP',
]
},
'Texture3DTarget': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_TEXTURE_3D',
'GL_TEXTURE_2D_ARRAY',
],
'invalid': [
'GL_TEXTURE_2D',
]
},
'TextureBindTarget': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_2D',
'GL_TEXTURE_CUBE_MAP',
],
'valid_es3': [
'GL_TEXTURE_3D',
'GL_TEXTURE_2D_ARRAY',
],
'invalid': [
'GL_TEXTURE_1D',
'GL_TEXTURE_3D',
],
},
'TransformFeedbackBindTarget': {
'type': 'GLenum',
'valid': [
'GL_TRANSFORM_FEEDBACK',
],
'invalid': [
'GL_TEXTURE_2D',
],
},
'TransformFeedbackPrimitiveMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_POINTS',
'GL_LINES',
'GL_TRIANGLES',
],
'invalid': [
'GL_LINE_LOOP',
],
},
'ShaderType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_VERTEX_SHADER',
'GL_FRAGMENT_SHADER',
],
'invalid': [
'GL_GEOMETRY_SHADER',
],
},
'FaceType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_FRONT',
'GL_BACK',
'GL_FRONT_AND_BACK',
],
},
'FaceMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_CW',
'GL_CCW',
],
},
'CmpFunction': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NEVER',
'GL_LESS',
'GL_EQUAL',
'GL_LEQUAL',
'GL_GREATER',
'GL_NOTEQUAL',
'GL_GEQUAL',
'GL_ALWAYS',
],
},
'Equation': {
'type': 'GLenum',
'valid': [
'GL_FUNC_ADD',
'GL_FUNC_SUBTRACT',
'GL_FUNC_REVERSE_SUBTRACT',
],
'valid_es3': [
'GL_MIN',
'GL_MAX',
],
'invalid': [
'GL_NONE',
],
},
'SrcBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
'GL_SRC_ALPHA_SATURATE',
],
},
'DstBlendFactor': {
'type': 'GLenum',
'valid': [
'GL_ZERO',
'GL_ONE',
'GL_SRC_COLOR',
'GL_ONE_MINUS_SRC_COLOR',
'GL_DST_COLOR',
'GL_ONE_MINUS_DST_COLOR',
'GL_SRC_ALPHA',
'GL_ONE_MINUS_SRC_ALPHA',
'GL_DST_ALPHA',
'GL_ONE_MINUS_DST_ALPHA',
'GL_CONSTANT_COLOR',
'GL_ONE_MINUS_CONSTANT_COLOR',
'GL_CONSTANT_ALPHA',
'GL_ONE_MINUS_CONSTANT_ALPHA',
],
'valid_es3': [
'GL_SRC_ALPHA_SATURATE'
]
},
'Capability': {
'type': 'GLenum',
'valid': ["GL_%s" % cap['name'].upper()
for cap in build_cmd_buffer_lib._CAPABILITY_FLAGS
if ('es3' not in cap or cap['es3'] != True)
and 'extension_flag' not in cap],
'valid_es3': ["GL_%s" % cap['name'].upper()
for cap in build_cmd_buffer_lib._CAPABILITY_FLAGS
if ('es3' in cap and cap['es3'] == True)
and 'extension_flag' not in cap],
'invalid': [
'GL_CLIP_PLANE0',
'GL_POINT_SPRITE',
],
},
'DrawMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_POINTS',
'GL_LINE_STRIP',
'GL_LINE_LOOP',
'GL_LINES',
'GL_TRIANGLE_STRIP',
'GL_TRIANGLE_FAN',
'GL_TRIANGLES',
],
'invalid': [
'GL_QUADS',
'GL_POLYGON',
],
},
'IndexType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
],
'valid_es3': [
'GL_UNSIGNED_INT',
],
'invalid': [
'GL_INT',
],
},
'GetMaxIndexType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT',
'GL_UNSIGNED_INT',
],
'invalid': [
'GL_INT',
],
},
'Attachment': {
'type': 'GLenum',
'valid': [
'GL_COLOR_ATTACHMENT0',
'GL_DEPTH_ATTACHMENT',
'GL_STENCIL_ATTACHMENT',
],
'valid_es3': [
'GL_DEPTH_STENCIL_ATTACHMENT',
],
},
'AttachmentQuery': {
'type': 'GLenum',
'valid': [
'GL_COLOR_ATTACHMENT0',
'GL_DEPTH_ATTACHMENT',
'GL_STENCIL_ATTACHMENT',
],
'valid_es3': [
'GL_DEPTH_STENCIL_ATTACHMENT',
# For backbuffer.
'GL_COLOR_EXT',
'GL_DEPTH_EXT',
'GL_STENCIL_EXT',
],
},
'BackbufferAttachment': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_COLOR_EXT',
'GL_DEPTH_EXT',
'GL_STENCIL_EXT',
],
},
'BufferParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_BUFFER_SIZE',
'GL_BUFFER_USAGE',
],
'valid_es3': [
'GL_BUFFER_ACCESS_FLAGS',
'GL_BUFFER_MAPPED',
],
'invalid': [
'GL_PIXEL_PACK_BUFFER',
],
},
'BufferParameter64': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_BUFFER_SIZE',
'GL_BUFFER_MAP_LENGTH',
'GL_BUFFER_MAP_OFFSET',
],
'invalid': [
'GL_PIXEL_PACK_BUFFER',
],
},
'BufferMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_INTERLEAVED_ATTRIBS',
'GL_SEPARATE_ATTRIBS',
],
'invalid': [
'GL_PIXEL_PACK_BUFFER',
],
},
'FramebufferAttachmentParameter': {
'type': 'GLenum',
'valid': [
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE',
],
'valid_es3': [
'GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE',
'GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE',
'GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING',
'GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER',
],
},
'FramebufferParameter' : {
'type': 'GLenum',
'valid' : [],
},
'ProgramParameter': {
'type': 'GLenum',
'valid': [
'GL_DELETE_STATUS',
'GL_LINK_STATUS',
'GL_VALIDATE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_ATTACHED_SHADERS',
'GL_ACTIVE_ATTRIBUTES',
'GL_ACTIVE_ATTRIBUTE_MAX_LENGTH',
'GL_ACTIVE_UNIFORMS',
'GL_ACTIVE_UNIFORM_MAX_LENGTH',
],
'valid_es3': [
'GL_ACTIVE_UNIFORM_BLOCKS',
'GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH',
'GL_TRANSFORM_FEEDBACK_BUFFER_MODE',
'GL_TRANSFORM_FEEDBACK_VARYINGS',
'GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH',
],
'invalid': [
'GL_PROGRAM_BINARY_RETRIEVABLE_HINT', # not supported in Chromium.
],
},
'QueryObjectParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_QUERY_RESULT_EXT',
'GL_QUERY_RESULT_AVAILABLE_EXT',
'GL_QUERY_RESULT_AVAILABLE_NO_FLUSH_CHROMIUM_EXT',
],
},
'QueryParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_CURRENT_QUERY_EXT',
],
},
'QueryTarget': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_SAMPLES_PASSED_ARB',
'GL_ANY_SAMPLES_PASSED_EXT',
'GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT',
'GL_COMMANDS_ISSUED_CHROMIUM',
'GL_COMMANDS_ISSUED_TIMESTAMP_CHROMIUM',
'GL_LATENCY_QUERY_CHROMIUM',
'GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM',
'GL_COMMANDS_COMPLETED_CHROMIUM',
'GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM',
'GL_PROGRAM_COMPLETION_QUERY_CHROMIUM',
],
},
'RenderBufferParameter': {
'type': 'GLenum',
'valid': [
'GL_RENDERBUFFER_RED_SIZE',
'GL_RENDERBUFFER_GREEN_SIZE',
'GL_RENDERBUFFER_BLUE_SIZE',
'GL_RENDERBUFFER_ALPHA_SIZE',
'GL_RENDERBUFFER_DEPTH_SIZE',
'GL_RENDERBUFFER_STENCIL_SIZE',
'GL_RENDERBUFFER_WIDTH',
'GL_RENDERBUFFER_HEIGHT',
'GL_RENDERBUFFER_INTERNAL_FORMAT',
],
'valid_es3': [
'GL_RENDERBUFFER_SAMPLES',
],
},
'InternalFormatParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NUM_SAMPLE_COUNTS',
'GL_SAMPLES',
],
},
'SamplerParameter': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_MIN_LOD',
'GL_TEXTURE_MAX_LOD',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
'GL_TEXTURE_WRAP_R',
'GL_TEXTURE_COMPARE_MODE',
'GL_TEXTURE_COMPARE_FUNC',
],
'invalid': [
'GL_GENERATE_MIPMAP',
],
},
'ShaderParameter': {
'type': 'GLenum',
'valid': [
'GL_SHADER_TYPE',
'GL_DELETE_STATUS',
'GL_COMPILE_STATUS',
'GL_INFO_LOG_LENGTH',
'GL_SHADER_SOURCE_LENGTH',
'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
],
},
'ShaderPrecision': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_LOW_FLOAT',
'GL_MEDIUM_FLOAT',
'GL_HIGH_FLOAT',
'GL_LOW_INT',
'GL_MEDIUM_INT',
'GL_HIGH_INT',
],
},
'StringType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_VENDOR',
'GL_RENDERER',
'GL_VERSION',
'GL_SHADING_LANGUAGE_VERSION',
'GL_EXTENSIONS',
],
},
'IndexedStringType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_EXTENSIONS',
],
},
'TextureParameter': {
'type': 'GLenum',
'valid': [
'GL_TEXTURE_MAG_FILTER',
'GL_TEXTURE_MIN_FILTER',
'GL_TEXTURE_WRAP_S',
'GL_TEXTURE_WRAP_T',
],
'valid_es3': [
'GL_TEXTURE_BASE_LEVEL',
'GL_TEXTURE_COMPARE_FUNC',
'GL_TEXTURE_COMPARE_MODE',
'GL_TEXTURE_IMMUTABLE_FORMAT',
'GL_TEXTURE_IMMUTABLE_LEVELS',
'GL_TEXTURE_MAX_LEVEL',
'GL_TEXTURE_MAX_LOD',
'GL_TEXTURE_MIN_LOD',
'GL_TEXTURE_WRAP_R',
],
'invalid': [
'GL_GENERATE_MIPMAP',
],
},
'TextureWrapMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_CLAMP_TO_EDGE',
'GL_MIRRORED_REPEAT',
'GL_REPEAT',
],
},
'TextureMinFilterMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NEAREST',
'GL_LINEAR',
'GL_NEAREST_MIPMAP_NEAREST',
'GL_LINEAR_MIPMAP_NEAREST',
'GL_NEAREST_MIPMAP_LINEAR',
'GL_LINEAR_MIPMAP_LINEAR',
],
},
'TextureMagFilterMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NEAREST',
'GL_LINEAR',
],
},
'TextureCompareFunc': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_LEQUAL',
'GL_GEQUAL',
'GL_LESS',
'GL_GREATER',
'GL_EQUAL',
'GL_NOTEQUAL',
'GL_ALWAYS',
'GL_NEVER',
],
},
'TextureCompareMode': {
'type': 'GLenum',
'valid': [
'GL_NONE',
'GL_COMPARE_REF_TO_TEXTURE',
],
},
'TextureSrgbDecodeExt': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_DECODE_EXT',
'GL_SKIP_DECODE_EXT',
],
},
'TextureSwizzle': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_RED',
'GL_GREEN',
'GL_BLUE',
'GL_ALPHA',
'GL_ZERO',
'GL_ONE',
],
},
'TextureUsage': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_NONE',
'GL_FRAMEBUFFER_ATTACHMENT_ANGLE',
],
},
'VertexAttribute': {
'type': 'GLenum',
'valid': [
# some enum that the decoder actually passes through to GL needs
# to be the first listed here since it's used in unit tests.
'GL_VERTEX_ATTRIB_ARRAY_NORMALIZED',
'GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING',
'GL_VERTEX_ATTRIB_ARRAY_ENABLED',
'GL_VERTEX_ATTRIB_ARRAY_SIZE',
'GL_VERTEX_ATTRIB_ARRAY_STRIDE',
'GL_VERTEX_ATTRIB_ARRAY_TYPE',
'GL_CURRENT_VERTEX_ATTRIB',
],
'valid_es3': [
'GL_VERTEX_ATTRIB_ARRAY_INTEGER',
'GL_VERTEX_ATTRIB_ARRAY_DIVISOR',
],
},
'VertexPointer': {
'type': 'GLenum',
'valid': [
'GL_VERTEX_ATTRIB_ARRAY_POINTER',
],
},
'HintTarget': {
'type': 'GLenum',
'valid': [
'GL_GENERATE_MIPMAP_HINT',
'GL_TEXTURE_FILTERING_HINT_CHROMIUM',
],
'valid_es3': [
'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
],
'invalid': [
'GL_PERSPECTIVE_CORRECTION_HINT',
],
},
'HintMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_FASTEST',
'GL_NICEST',
'GL_DONT_CARE',
],
},
'PixelStore': {
'type': 'GLenum',
'valid': [
'GL_PACK_ALIGNMENT',
'GL_UNPACK_ALIGNMENT',
],
'valid_es3': [
'GL_PACK_ROW_LENGTH',
'GL_PACK_SKIP_PIXELS',
'GL_PACK_SKIP_ROWS',
'GL_UNPACK_ROW_LENGTH',
'GL_UNPACK_IMAGE_HEIGHT',
'GL_UNPACK_SKIP_PIXELS',
'GL_UNPACK_SKIP_ROWS',
'GL_UNPACK_SKIP_IMAGES',
],
'invalid': [
'GL_PACK_SWAP_BYTES',
'GL_UNPACK_SWAP_BYTES',
],
},
'PixelStoreAlignment': {
'type': 'GLint',
'is_complete': True,
'valid': [
'1',
'2',
'4',
'8',
],
'invalid': [
'3',
'9',
],
},
'ReadPixelFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'valid_es3': [
'GL_RED',
'GL_RED_INTEGER',
'GL_RG',
'GL_RG_INTEGER',
'GL_RGB_INTEGER',
'GL_RGBA_INTEGER',
],
},
'PixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'valid_es3': [
'GL_BYTE',
'GL_UNSIGNED_SHORT',
'GL_SHORT',
'GL_UNSIGNED_INT',
'GL_INT',
'GL_HALF_FLOAT',
'GL_FLOAT',
'GL_UNSIGNED_INT_2_10_10_10_REV',
'GL_UNSIGNED_INT_10F_11F_11F_REV',
'GL_UNSIGNED_INT_5_9_9_9_REV',
'GL_UNSIGNED_INT_24_8',
'GL_FLOAT_32_UNSIGNED_INT_24_8_REV',
],
'invalid': [
'GL_UNSIGNED_BYTE_3_3_2',
],
},
'ReadPixelType': {
'type': 'GLenum',
'valid': [
'GL_UNSIGNED_BYTE',
'GL_UNSIGNED_SHORT_5_6_5',
'GL_UNSIGNED_SHORT_4_4_4_4',
'GL_UNSIGNED_SHORT_5_5_5_1',
],
'valid_es3': [
'GL_BYTE',
'GL_UNSIGNED_SHORT',
'GL_SHORT',
'GL_UNSIGNED_INT',
'GL_INT',
'GL_HALF_FLOAT',
'GL_FLOAT',
'GL_UNSIGNED_INT_2_10_10_10_REV',
],
},
'RenderBufferFormat': {
'type': 'GLenum',
'valid': [
'GL_RGBA4',
'GL_RGB565',
'GL_RGB5_A1',
'GL_DEPTH_COMPONENT16',
'GL_STENCIL_INDEX8',
],
'valid_es3': [
'GL_R8',
'GL_R8UI',
'GL_R8I',
'GL_R16UI',
'GL_R16I',
'GL_R32UI',
'GL_R32I',
'GL_RG8',
'GL_RG8UI',
'GL_RG8I',
'GL_RG16UI',
'GL_RG16I',
'GL_RG32UI',
'GL_RG32I',
'GL_RGB8',
'GL_RGBA8',
'GL_SRGB8_ALPHA8',
'GL_RGB10_A2',
'GL_RGBA8UI',
'GL_RGBA8I',
'GL_RGB10_A2UI',
'GL_RGBA16UI',
'GL_RGBA16I',
'GL_RGBA32UI',
'GL_RGBA32I',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32F',
'GL_DEPTH24_STENCIL8',
'GL_DEPTH32F_STENCIL8',
],
},
'ShaderBinaryFormat': {
'type': 'GLenum',
'valid': [
],
},
'StencilOp': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_KEEP',
'GL_ZERO',
'GL_REPLACE',
'GL_INCR',
'GL_INCR_WRAP',
'GL_DECR',
'GL_DECR_WRAP',
'GL_INVERT',
],
},
'TextureFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'valid_es3': [
'GL_RED',
'GL_RED_INTEGER',
'GL_RG',
'GL_RG_INTEGER',
'GL_RGB_INTEGER',
'GL_RGBA_INTEGER',
'GL_DEPTH_COMPONENT',
'GL_DEPTH_STENCIL',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
'valid_es3': [
'GL_R8',
'GL_R8_SNORM',
'GL_R16F',
'GL_R32F',
'GL_R8UI',
'GL_R8I',
'GL_R16UI',
'GL_R16I',
'GL_R32UI',
'GL_R32I',
'GL_RG8',
'GL_RG8_SNORM',
'GL_RG16F',
'GL_RG32F',
'GL_RG8UI',
'GL_RG8I',
'GL_RG16UI',
'GL_RG16I',
'GL_RG32UI',
'GL_RG32I',
'GL_RGB8',
'GL_SRGB8',
'GL_RGB565',
'GL_RGB8_SNORM',
'GL_R11F_G11F_B10F',
'GL_RGB9_E5',
'GL_RGB16F',
'GL_RGB32F',
'GL_RGB8UI',
'GL_RGB8I',
'GL_RGB16UI',
'GL_RGB16I',
'GL_RGB32UI',
'GL_RGB32I',
'GL_RGBA8',
'GL_SRGB8_ALPHA8',
'GL_RGBA8_SNORM',
'GL_RGB5_A1',
'GL_RGBA4',
'GL_RGB10_A2',
'GL_RGBA16F',
'GL_RGBA32F',
'GL_RGBA8UI',
'GL_RGBA8I',
'GL_RGB10_A2UI',
'GL_RGBA16UI',
'GL_RGBA16I',
'GL_RGBA32UI',
'GL_RGBA32I',
# The DEPTH/STENCIL formats are not supported in CopyTexImage2D.
# We will reject them dynamically in GPU command buffer.
'GL_DEPTH_COMPONENT16',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32F',
'GL_DEPTH24_STENCIL8',
'GL_DEPTH32F_STENCIL8',
],
'invalid': [
'GL_BGRA',
'GL_BGR',
],
},
'TextureUnsizedInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_ALPHA',
'GL_LUMINANCE',
'GL_LUMINANCE_ALPHA',
'GL_RGB',
'GL_RGBA',
],
},
'TextureSizedColorRenderableInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_R8',
'GL_R8UI',
'GL_R8I',
'GL_R16UI',
'GL_R16I',
'GL_R32UI',
'GL_R32I',
'GL_RG8',
'GL_RG8UI',
'GL_RG8I',
'GL_RG16UI',
'GL_RG16I',
'GL_RG32UI',
'GL_RG32I',
'GL_RGB8',
'GL_RGB565',
'GL_RGBA8',
'GL_SRGB8_ALPHA8',
'GL_RGB5_A1',
'GL_RGBA4',
'GL_RGB10_A2',
'GL_RGBA8UI',
'GL_RGBA8I',
'GL_RGB10_A2UI',
'GL_RGBA16UI',
'GL_RGBA16I',
'GL_RGBA32UI',
'GL_RGBA32I',
],
},
'TextureDepthRenderableInternalFormat': {
'type': 'GLenum',
'valid': [],
'valid_es3': [
'GL_DEPTH_COMPONENT16',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32F',
'GL_DEPTH24_STENCIL8',
'GL_DEPTH32F_STENCIL8',
],
},
'TextureStencilRenderableInternalFormat': {
'type': 'GLenum',
'valid': [],
'valid_es3': [
'GL_STENCIL_INDEX8',
'GL_DEPTH24_STENCIL8',
'GL_DEPTH32F_STENCIL8',
],
},
'TextureSizedTextureFilterableInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_R8',
'GL_R8_SNORM',
'GL_R16F',
'GL_RG8',
'GL_RG8_SNORM',
'GL_RG16F',
'GL_RGB8',
'GL_SRGB8',
'GL_RGB565',
'GL_RGB8_SNORM',
'GL_R11F_G11F_B10F',
'GL_RGB9_E5',
'GL_RGB16F',
'GL_RGBA8',
'GL_SRGB8_ALPHA8',
'GL_RGBA8_SNORM',
'GL_RGB5_A1',
'GL_RGBA4',
'GL_RGB10_A2',
'GL_RGBA16F',
'GL_RGB_YCRCB_420_CHROMIUM',
'GL_RGB_YCBCR_422_CHROMIUM',
'GL_RGB_YCBCR_420V_CHROMIUM',
'GL_RGB_YCBCR_P010_CHROMIUM',
'GL_R16_EXT',
],
},
'TextureInternalFormatStorage': {
'type': 'GLenum',
'valid': [
'GL_RGB565',
'GL_RGBA4',
'GL_RGB5_A1',
'GL_ALPHA8_EXT',
'GL_LUMINANCE8_EXT',
'GL_LUMINANCE8_ALPHA8_EXT',
'GL_RGB8_OES',
'GL_RGBA8_OES',
],
'valid_es3': [
'GL_R8',
'GL_R8_SNORM',
'GL_R16F',
'GL_R32F',
'GL_R8UI',
'GL_R8I',
'GL_R16UI',
'GL_R16I',
'GL_R32UI',
'GL_R32I',
'GL_RG8',
'GL_RG8_SNORM',
'GL_RG16F',
'GL_RG32F',
'GL_RG8UI',
'GL_RG8I',
'GL_RG16UI',
'GL_RG16I',
'GL_RG32UI',
'GL_RG32I',
'GL_RGB8',
'GL_SRGB8',
'GL_RGB8_SNORM',
'GL_R11F_G11F_B10F',
'GL_RGB9_E5',
'GL_RGB16F',
'GL_RGB32F',
'GL_RGB8UI',
'GL_RGB8I',
'GL_RGB16UI',
'GL_RGB16I',
'GL_RGB32UI',
'GL_RGB32I',
'GL_RGBA8',
'GL_SRGB8_ALPHA8',
'GL_RGBA8_SNORM',
'GL_RGB10_A2',
'GL_RGBA16F',
'GL_RGBA32F',
'GL_RGBA8UI',
'GL_RGBA8I',
'GL_RGB10_A2UI',
'GL_RGBA16UI',
'GL_RGBA16I',
'GL_RGBA32UI',
'GL_RGBA32I',
'GL_DEPTH_COMPONENT16',
'GL_DEPTH_COMPONENT24',
'GL_DEPTH_COMPONENT32F',
'GL_DEPTH24_STENCIL8',
'GL_DEPTH32F_STENCIL8',
],
'deprecated_es3': [
'GL_ALPHA8_EXT',
'GL_LUMINANCE8_EXT',
'GL_LUMINANCE8_ALPHA8_EXT',
'GL_ALPHA16F_EXT',
'GL_LUMINANCE16F_EXT',
'GL_LUMINANCE_ALPHA16F_EXT',
'GL_ALPHA32F_EXT',
'GL_LUMINANCE32F_EXT',
'GL_LUMINANCE_ALPHA32F_EXT',
],
},
'ImageInternalFormat': {
'type': 'GLenum',
'valid': [
'GL_RGB',
'GL_RGB_YCRCB_420_CHROMIUM',
'GL_RGB_YCBCR_422_CHROMIUM',
'GL_RGB_YCBCR_420V_CHROMIUM',
'GL_RGB_YCBCR_P010_CHROMIUM',
'GL_RGBA',
],
},
'UniformParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_UNIFORM_SIZE',
'GL_UNIFORM_TYPE',
'GL_UNIFORM_NAME_LENGTH',
'GL_UNIFORM_BLOCK_INDEX',
'GL_UNIFORM_OFFSET',
'GL_UNIFORM_ARRAY_STRIDE',
'GL_UNIFORM_MATRIX_STRIDE',
'GL_UNIFORM_IS_ROW_MAJOR',
],
'invalid': [
'GL_UNIFORM_BLOCK_NAME_LENGTH',
],
},
'UniformBlockParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_UNIFORM_BLOCK_BINDING',
'GL_UNIFORM_BLOCK_DATA_SIZE',
'GL_UNIFORM_BLOCK_NAME_LENGTH',
'GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS',
'GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES',
'GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER',
'GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER',
],
'invalid': [
'GL_NEAREST',
],
},
'VertexAttribType': {
'type': 'GLenum',
'valid': [
'GL_BYTE',
'GL_UNSIGNED_BYTE',
'GL_SHORT',
'GL_UNSIGNED_SHORT',
# 'GL_FIXED', // This is not available on Desktop GL.
'GL_FLOAT',
],
'valid_es3': [
'GL_INT',
'GL_UNSIGNED_INT',
'GL_HALF_FLOAT',
'GL_INT_2_10_10_10_REV',
'GL_UNSIGNED_INT_2_10_10_10_REV',
],
'invalid': [
'GL_DOUBLE',
],
},
'VertexAttribIType': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_BYTE',
'GL_UNSIGNED_BYTE',
'GL_SHORT',
'GL_UNSIGNED_SHORT',
'GL_INT',
'GL_UNSIGNED_INT',
],
'invalid': [
'GL_FLOAT',
'GL_DOUBLE',
],
},
'TextureBorder': {
'type': 'GLint',
'is_complete': True,
'valid': [
'0',
],
'invalid': [
'1',
],
},
'VertexAttribSize': {
'type': 'GLint',
'validator': False,
'valid': [
'1',
'2',
'3',
'4',
],
'invalid': [
'0',
'5',
],
},
'ResetStatus': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_GUILTY_CONTEXT_RESET_ARB',
'GL_INNOCENT_CONTEXT_RESET_ARB',
'GL_UNKNOWN_CONTEXT_RESET_ARB',
],
},
'SyncCondition': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_SYNC_GPU_COMMANDS_COMPLETE',
],
'invalid': [
'0',
],
},
'SyncFlags': {
'type': 'GLbitfield',
'is_complete': True,
'valid': [
'0',
],
'invalid': [
'1',
],
},
'SyncFlushFlags': {
'type': 'GLbitfield',
'valid': [
'GL_SYNC_FLUSH_COMMANDS_BIT',
'0',
],
'invalid': [
'0xFFFFFFFF',
],
},
'SyncParameter': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_SYNC_STATUS', # This needs to be the 1st; all others are cached.
'GL_OBJECT_TYPE',
'GL_SYNC_CONDITION',
'GL_SYNC_FLAGS',
],
'invalid': [
'GL_SYNC_FENCE',
],
},
'ClientBufferUsage': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_SCANOUT_CHROMIUM',
],
'invalid': [
'GL_NONE',
],
},
'WindowRectanglesMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_INCLUSIVE_EXT',
'GL_EXCLUSIVE_EXT',
],
},
'SwapBuffersFlags': {
'type': 'GLbitfield',
'is_complete': True,
'valid': [
'0',
'gpu::SwapBuffersFlags::kVSyncParams',
],
},
'SharedImageAccessMode': {
'type': 'GLenum',
'is_complete': True,
'valid': [
'GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM',
'GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM',
'GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM',
],
},
}
# A function info object specifies the type and other special data for the
# command that will be generated. A base function info object is generated by
# parsing the "gles2_cmd_buffer_functions.txt", one for each function in the
# file. These function info objects can be augmented and their values can be
# overridden by adding an object to the table below.
#
# Must match function names specified in "gles2_cmd_buffer_functions.txt".
#
# cmd_comment: A comment added to the cmd format.
# type: defines which handler will be used to generate code.
# decoder_func: defines which function to call in the decoder to execute the
# corresponding GL command. If not specified the GL command will
# be called directly.
# gl_test_func: GL function that is expected to be called when testing.
# cmd_args: The arguments to use for the command. This overrides generating
# them based on the GL function arguments.
# data_transfer_methods: Array of methods that are used for transfering the
# pointer data. Possible values: 'immediate', 'shm', 'bucket'.
# The default is 'immediate' if the command has one pointer
# argument, otherwise 'shm'. One command is generated for each
# transfer method. Affects only commands which are not of type
# 'GETn' or 'GLcharN'.
# Note: the command arguments that affect this are the final args,
# taking cmd_args override into consideration.
# impl_func: Whether or not to generate the GLES2Implementation part of this
# command.
# internal: If true, this is an internal command only, not exposed to the
# client.
# needs_size: If True a data_size field is added to the command.
# count: The number of units per element. For PUTn or PUT types.
# use_count_func: If True the actual data count needs to be computed; the count
# argument specifies the maximum count.
# unit_test: If False no service side unit test will be generated.
# client_test: If False no client side unit test will be generated.
# expectation: If False the unit test will have no expected calls.
# gen_func: Name of function that generates GL resource for corresponding
# bind function.
# states: array of states that get set by this function corresponding to
# the given arguments
# no_gl: no GL function is called.
# valid_args: A dictionary of argument indices to args to use in unit tests
# when they can not be automatically determined.
# pepper_interface: The pepper interface that is used for this extension
# pepper_name: The name of the function as exposed to pepper.
# pepper_args: A string representing the argument list (what would appear in
# C/C++ between the parentheses for the function declaration)
# that the Pepper API expects for this function. Use this only if
# the stable Pepper API differs from the GLES2 argument list.
# invalid_test: False if no invalid test needed.
# shadowed: True = the value is shadowed so no glGetXXX call will be made.
# first_element_only: For PUT types, True if only the first element of an
# array is used and we end up calling the single value
# corresponding function. eg. TexParameteriv -> TexParameteri
# extension: Function is an extension to GL and should not be exposed to
# pepper unless pepper_interface is defined.
# extension_flag: Function is an extension and should be enabled only when
# the corresponding feature info flag is enabled. Implies
# 'extension': True.
# not_shared: For GENn types, True if objects can't be shared between contexts
# es3: ES3 API. True if the function requires an ES3 or WebGL2 context.
# es31: ES31 API. True if the function requires an WebGL2Compute
# context.
_FUNCTION_INFO = {
'ActiveTexture': {
'decoder_func': 'DoActiveTexture',
'unit_test': False,
'impl_func': False,
'client_test': False,
},
'AttachShader': {'decoder_func': 'DoAttachShader'},
'BindAttribLocation': {
'type': 'GLchar',
'data_transfer_methods': ['bucket'],
'needs_size': True,
},
'BindBuffer': {
'type': 'Bind',
'decoder_func': 'DoBindBuffer',
'gen_func': 'GenBuffersARB',
},
'BindBufferBase': {
'type': 'Bind',
'decoder_func': 'DoBindBufferBase',
'gen_func': 'GenBuffersARB',
'unit_test': False,
'es3': True,
},
'BindBufferRange': {
'type': 'Bind',
'decoder_func': 'DoBindBufferRange',
'gen_func': 'GenBuffersARB',
'unit_test': False,
'valid_args': {
'3': '4',
'4': '4'
},
'es3': True,
},
'BindFramebuffer': {
'type': 'Bind',
'decoder_func': 'DoBindFramebuffer',
'gl_test_func': 'glBindFramebufferEXT',
'gen_func': 'GenFramebuffersEXT',
'trace_level': 1,
},
'BindImageTexture':{
'cmd_args': 'GLuint unit, GLuint texture, GLint level, GLboolean layered, '
'GLint layer, GLenum access, GLenum format',
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'BindRenderbuffer': {
'type': 'Bind',
'decoder_func': 'DoBindRenderbuffer',
'gl_test_func': 'glBindRenderbufferEXT',
'gen_func': 'GenRenderbuffersEXT',
},
'BindSampler': {
'type': 'Bind',
'decoder_func': 'DoBindSampler',
'es3': True,
},
'BindTexture': {
'type': 'Bind',
'decoder_func': 'DoBindTexture',
'gen_func': 'GenTextures',
# TODO: remove this once client side caching works.
'client_test': False,
'unit_test': False,
'trace_level': 2,
},
'BindTransformFeedback': {
'type': 'Bind',
'decoder_func': 'DoBindTransformFeedback',
'es3': True,
'unit_test': False,
},
'BlitFramebufferCHROMIUM': {
'decoder_func': 'DoBlitFramebufferCHROMIUM',
'unit_test': False,
'extension': 'chromium_framebuffer_multisample',
'extension_flag': 'chromium_framebuffer_multisample',
'pepper_interface': 'FramebufferBlit',
'pepper_name': 'BlitFramebufferEXT',
'defer_reads': True,
'defer_draws': True,
'trace_level': 1,
},
'BufferData': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'size_args': {
'data': 'size', },
'client_test': False,
'trace_level': 2,
},
'BufferSubData': {
'type': 'Data',
'client_test': False,
'decoder_func': 'DoBufferSubData',
'data_transfer_methods': ['shm'],
'size_args': {
'data': 'size', },
'trace_level': 2,
},
'CheckFramebufferStatus': {
'type': 'Is',
'decoder_func': 'DoCheckFramebufferStatus',
'gl_test_func': 'glCheckFramebufferStatusEXT',
'error_value': 'GL_FRAMEBUFFER_UNSUPPORTED',
'result': ['GLenum'],
},
'Clear': {
'decoder_func': 'DoClear',
'defer_draws': True,
'trace_level': 2,
'valid_args': {
'0': 'GL_COLOR_BUFFER_BIT'
},
},
'ClearBufferiv': {
'type': 'PUT',
'use_count_func': True,
'count': 4,
'decoder_func': 'DoClearBufferiv',
'unit_test': False,
'es3': True,
'trace_level': 2,
},
'ClearBufferuiv': {
'type': 'PUT',
'use_count_func': True,
'count': 4,
'decoder_func': 'DoClearBufferuiv',
'unit_test': False,
'es3': True,
'trace_level': 2,
},
'ClearBufferfv': {
'type': 'PUT',
'use_count_func': True,
'count': 4,
'decoder_func': 'DoClearBufferfv',
'unit_test': False,
'es3': True,
'trace_level': 2,
},
'ClearBufferfi': {
'es3': True,
'decoder_func': 'DoClearBufferfi',
'unit_test': False,
'trace_level': 2,
},
'ClearColor': {
'type': 'StateSet',
'state': 'ClearColor',
},
'ClearDepthf': {
'type': 'StateSet',
'state': 'ClearDepthf',
'decoder_func': 'glClearDepth',
'gl_test_func': 'glClearDepth',
'valid_args': {
'0': '0.5f'
},
},
'ClientWaitSync': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args': 'GLuint sync, GLbitfieldSyncFlushFlags flags, '
'GLuint64 timeout, GLenum* result',
'es3': True,
'result': ['GLenum'],
'trace_level': 2,
},
'ColorMask': {
'type': 'StateSet',
'state': 'ColorMask',
'no_gl': True,
'expectation': False,
},
'ColorMaskiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
},
'ContextVisibilityHintCHROMIUM': {
'decoder_func': 'DoContextVisibilityHintCHROMIUM',
'extension': 'CHROMIUM_context_visibility_hint',
'unit_test': False,
'client_test': False,
},
'CopyBufferSubData': {
'decoder_func': 'DoCopyBufferSubData',
'impl_func': False,
'unit_test': False,
'es3': True,
},
'CoverageModulationCHROMIUM': {
'type': 'StateSet',
'state': 'CoverageModulationCHROMIUM',
'decoder_func': 'glCoverageModulationNV',
'extension': 'CHROMIUM_framebuffer_mixed_samples',
'extension_flag': 'chromium_framebuffer_mixed_samples',
},
'CreateAndConsumeTextureCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_texture_mailbox",
'trace_level': 2,
},
'CreateAndConsumeTextureINTERNAL': {
'decoder_func': 'DoCreateAndConsumeTextureINTERNAL',
'internal': True,
'type': 'PUT',
'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
'impl_func': False,
'unit_test': False,
'trace_level': 2,
},
'ClearStencil': {
'type': 'StateSet',
'state': 'ClearStencil',
},
'EnableFeatureCHROMIUM': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'decoder_func': 'DoEnableFeatureCHROMIUM',
'cmd_args': 'GLuint bucket_id, GLint* result',
'result': ['GLint'],
'extension': 'GL_CHROMIUM_enable_feature',
'pepper_interface': 'ChromiumEnableFeature',
},
'CompileShader': {'decoder_func': 'DoCompileShader', 'unit_test': False},
'CompressedTexImage2D': {
'type': 'Custom',
'data_transfer_methods': ['bucket', 'shm'],
'trace_level': 1,
},
'CompressedTexSubImage2D': {
'type': 'Custom',
'data_transfer_methods': ['bucket', 'shm'],
'trace_level': 1,
},
'CopyTexImage2D': {
'decoder_func': 'DoCopyTexImage2D',
'unit_test': False,
'defer_reads': True,
'trace_level': 1,
},
'CopyTexSubImage2D': {
'decoder_func': 'DoCopyTexSubImage2D',
'defer_reads': True,
'trace_level': 1,
},
'CompressedTexImage3D': {
'type': 'Custom',
'data_transfer_methods': ['bucket', 'shm'],
'es3': True,
'trace_level': 1,
},
'CompressedTexSubImage3D': {
'type': 'Custom',
'data_transfer_methods': ['bucket', 'shm'],
'es3': True,
'trace_level': 1,
},
'CopyTexSubImage3D': {
'decoder_func': 'DoCopyTexSubImage3D',
'unit_test': False,
'defer_reads': True,
'es3': True,
'trace_level': 1,
},
'CreateImageCHROMIUM': {
'type': 'NoCommand',
'cmd_args':
'ClientBuffer buffer, GLsizei width, GLsizei height, '
'GLenum internalformat',
'result': ['GLuint'],
'extension': "CHROMIUM_image",
'trace_level': 1,
},
'DestroyImageCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_image",
'trace_level': 1,
},
'DescheduleUntilFinishedCHROMIUM': {
'type': 'Custom',
'decoder_func': 'DoDescheduleUntilFinishedCHROMIUM',
'extension': "CHROMIUM_deschedule",
'trace_level': 1,
},
'CreateProgram': {
'type': 'Create',
'client_test': False,
},
'CreateShader': {
'type': 'Create',
'client_test': False,
},
'BlendColor': {
'type': 'StateSet',
'state': 'BlendColor',
},
'BlendEquation': {
'type': 'StateSetRGBAlpha',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendEquationiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
'valid_args': {
'1': 'GL_FUNC_SUBTRACT',
'2': 'GL_FUNC_SUBTRACT'
},
},
'BlendEquationSeparate': {
'type': 'StateSet',
'state': 'BlendEquation',
'valid_args': {
'0': 'GL_FUNC_SUBTRACT'
},
},
'BlendEquationSeparateiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
'valid_args': {
'1': 'GL_FUNC_SUBTRACT',
'2': 'GL_FUNC_SUBTRACT'
},
},
'BlendFunc': {
'type': 'StateSetRGBAlpha',
'state': 'BlendFunc',
},
'BlendFunciOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
},
'BlendFuncSeparate': {
'type': 'StateSet',
'state': 'BlendFunc',
},
'BlendFuncSeparateiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
},
'BlendBarrierKHR': {
'gl_test_func': 'glBlendBarrierKHR',
'extension': 'KHR_blend_equation_advanced',
'extension_flag': 'blend_equation_advanced',
'client_test': False,
},
'SampleCoverage': {'decoder_func': 'DoSampleCoverage'},
'StencilFunc': {
'type': 'StateSetFrontBack',
'state': 'StencilFunc',
},
'StencilFuncSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilFunc',
},
'StencilOp': {
'type': 'StateSetFrontBack',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'StencilOpSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilOp',
'valid_args': {
'1': 'GL_INCR'
},
},
'Hint': {
'type': 'StateSetNamedParameter',
'state': 'Hint',
},
'CullFace': {'type': 'StateSet', 'state': 'CullFace'},
'FrontFace': {'type': 'StateSet', 'state': 'FrontFace'},
'DepthFunc': {'type': 'StateSet', 'state': 'DepthFunc'},
'LineWidth': {
'type': 'StateSet',
'state': 'LineWidth',
'decoder_func': 'DoLineWidth',
'valid_args': {
'0': '2.0f'
},
},
'PolygonOffset': {
'type': 'StateSet',
'state': 'PolygonOffset',
},
'DeleteBuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'DeleteFramebuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
'trace_level': 2,
},
'DeleteProgram': { 'type': 'Delete' },
'DeleteRenderbuffers': {
'type': 'DELn',
'gl_test_func': 'glDeleteRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
'trace_level': 2,
},
'DeleteSamplers': {
'type': 'DELn',
'resource_type': 'Sampler',
'resource_types': 'Samplers',
'es3': True,
},
'DeleteShader': { 'type': 'Delete' },
'DeleteSync': {
'type': 'Delete',
'cmd_args': 'GLuint sync',
'resource_type': 'Sync',
'es3': True,
},
'DeleteTextures': {
'type': 'DELn',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'DeleteTransformFeedbacks': {
'type': 'DELn',
'resource_type': 'TransformFeedback',
'resource_types': 'TransformFeedbacks',
'es3': True,
'unit_test': False,
},
'DepthRangef': {
'decoder_func': 'DoDepthRangef',
'gl_test_func': 'glDepthRange',
},
'DepthMask': {
'type': 'StateSet',
'state': 'DepthMask',
'no_gl': True,
'expectation': False,
},
'DetachShader': {'decoder_func': 'DoDetachShader'},
'Disable': {
'decoder_func': 'DoDisable',
'impl_func': False,
'client_test': False,
},
'DisableiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'extension': 'OES_draw_buffers_indexed',
'decoder_func': 'DoDisableiOES',
'impl_func': False,
'unit_test': False,
},
'DisableVertexAttribArray': {
'decoder_func': 'DoDisableVertexAttribArray',
'impl_func': False,
'unit_test': False,
},
'DispatchCompute': {
'cmd_args': 'GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z',
'trace_level': 2,
'es31': True,
'unit_test': False,
},
'DispatchComputeIndirect': {
'cmd_args': 'GLintptrNotNegative offset',
'trace_level': 2,
'es31': True,
'unit_test': False,
},
'DrawArrays': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count',
'defer_draws': True,
'trace_level': 2,
},
'DrawArraysIndirect': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLuint offset',
'trace_level': 2,
'es31': True,
'unit_test': False,
'client_test': False,
},
'DrawElements': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset',
'client_test': False,
'defer_draws': True,
'trace_level': 2,
},
'DrawElementsIndirect': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLenumIndexType type, GLuint offset',
'trace_level': 2,
'es31': True,
'unit_test': False,
'client_test': False,
},
'DrawRangeElements': {
'type': 'NoCommand',
'es3': True,
},
'Enable': {
'decoder_func': 'DoEnable',
'impl_func': False,
'client_test': False,
},
'EnableiOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'extension': 'OES_draw_buffers_indexed',
'decoder_func': 'DoEnableiOES',
'impl_func': False,
'unit_test': False,
},
'EnableVertexAttribArray': {
'decoder_func': 'DoEnableVertexAttribArray',
'impl_func': False,
'unit_test': False,
},
'FenceSync': {
'type': 'Create',
'client_test': False,
'decoder_func': 'DoFenceSync',
'es3': True,
'trace_level': 1,
},
'Finish': {
'impl_func': False,
'client_test': False,
'decoder_func': 'DoFinish',
'defer_reads': True,
'trace_level': 1,
},
'Flush': {
'impl_func': False,
'decoder_func': 'DoFlush',
'trace_level': 1,
},
'FlushMappedBufferRange': {
'decoder_func': 'DoFlushMappedBufferRange',
'trace_level': 1,
'unit_test': False,
'es3': True,
},
'FramebufferRenderbuffer': {
'decoder_func': 'DoFramebufferRenderbuffer',
'gl_test_func': 'glFramebufferRenderbufferEXT',
'trace_level': 1,
},
'FramebufferTexture2D': {
'decoder_func': 'DoFramebufferTexture2D',
'gl_test_func': 'glFramebufferTexture2DEXT',
'unit_test': False,
'trace_level': 1,
},
'FramebufferTexture2DMultisampleEXT': {
'decoder_func': 'DoFramebufferTexture2DMultisample',
'gl_test_func': 'glFramebufferTexture2DMultisampleEXT',
'unit_test': False,
'extension': 'EXT_multisampled_render_to_texture',
'extension_flag': 'multisampled_render_to_texture',
'trace_level': 1,
},
'FramebufferTextureLayer': {
'decoder_func': 'DoFramebufferTextureLayer',
'es3': True,
'unit_test': False,
'trace_level': 1,
},
'GenerateMipmap': {
'decoder_func': 'DoGenerateMipmap',
'gl_test_func': 'glGenerateMipmapEXT',
'trace_level': 1,
},
'GenBuffers': {
'type': 'GENn',
'gl_test_func': 'glGenBuffersARB',
'resource_type': 'Buffer',
'resource_types': 'Buffers',
},
'GenFramebuffers': {
'type': 'GENn',
'gl_test_func': 'glGenFramebuffersEXT',
'resource_type': 'Framebuffer',
'resource_types': 'Framebuffers',
'not_shared': 'True',
},
'GenRenderbuffers': {
'type': 'GENn', 'gl_test_func': 'glGenRenderbuffersEXT',
'resource_type': 'Renderbuffer',
'resource_types': 'Renderbuffers',
},
'GenSamplers': {
'type': 'GENn',
'gl_test_func': 'glGenSamplers',
'resource_type': 'Sampler',
'resource_types': 'Samplers',
'es3': True,
},
'GenTextures': {
'type': 'GENn',
'gl_test_func': 'glGenTextures',
'resource_type': 'Texture',
'resource_types': 'Textures',
},
'GenTransformFeedbacks': {
'type': 'GENn',
'gl_test_func': 'glGenTransformFeedbacks',
'resource_type': 'TransformFeedback',
'resource_types': 'TransformFeedbacks',
'es3': True,
'not_shared': 'True',
},
'GetActiveAttrib': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
'void* result',
'result': [
'int32_t success',
'int32_t size',
'uint32_t type',
],
},
'GetActiveUniform': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
'void* result',
'result': [
'int32_t success',
'int32_t size',
'uint32_t type',
],
},
'GetActiveUniformBlockiv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLint>'],
'es3': True,
},
'GetActiveUniformBlockName': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
'void* result',
'result': ['int32_t'],
'es3': True,
},
'GetActiveUniformsiv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t indices_bucket_id, GLenum pname, '
'GLint* params',
'result': ['SizedResult<GLint>'],
'es3': True,
},
'GetAttachedShaders': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args': 'GLidProgram program, void* result, uint32_t result_size',
'result': ['SizedResult<GLuint>'],
},
'GetAttribLocation': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t name_bucket_id, GLint* location',
'result': ['GLint'],
'error_return': -1,
},
'GetFragDataIndexEXT': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t name_bucket_id, GLint* index',
'result': ['GLint'],
'error_return': -1,
'extension': 'EXT_blend_func_extended',
'extension_flag': 'ext_blend_func_extended',
},
'GetFragDataLocation': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t name_bucket_id, GLint* location',
'result': ['GLint'],
'error_return': -1,
'es3': True,
},
'GetBooleanv': {
'type': 'GETn',
'result': ['SizedResult<GLboolean>'],
'decoder_func': 'DoGetBooleanv',
'gl_test_func': 'glGetIntegerv',
},
'GetBooleani_v': {
'type': 'GETn',
'result': ['SizedResult<GLboolean>'],
'decoder_func': 'DoGetBooleani_v',
'shadowed': True,
'client_test': False,
'unit_test': False,
'es3': True
},
'GetBufferParameteri64v': {
'type': 'GETn',
'result': ['SizedResult<GLint64>'],
'decoder_func': 'DoGetBufferParameteri64v',
'expectation': False,
'shadowed': True,
'es3': True,
},
'GetBufferParameteriv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'decoder_func': 'DoGetBufferParameteriv',
'expectation': False,
'shadowed': True,
},
'GetError': {
'type': 'Is',
'decoder_func': 'GetErrorState()->GetGLError',
'impl_func': False,
'result': ['GLenum'],
'client_test': False,
},
'GetFloatv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'decoder_func': 'DoGetFloatv',
'gl_test_func': 'glGetIntegerv',
},
'GetFramebufferAttachmentParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetFramebufferAttachmentParameteriv',
'gl_test_func': 'glGetFramebufferAttachmentParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetGraphicsResetStatusKHR': {
'type': 'NoCommand',
'extension': True,
'trace_level': 1,
},
'GetInteger64v': {
'type': 'GETn',
'result': ['SizedResult<GLint64>'],
'client_test': False,
'decoder_func': 'DoGetInteger64v',
'gl_test_func': 'glGetIntegerv',
'es3': True
},
'GetIntegerv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'decoder_func': 'DoGetIntegerv',
'client_test': False,
},
'GetInteger64i_v': {
'type': 'GETn',
'result': ['SizedResult<GLint64>'],
'decoder_func': 'DoGetInteger64i_v',
'shadowed': True,
'client_test': False,
'unit_test': False,
'es3': True
},
'GetIntegeri_v': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'decoder_func': 'DoGetIntegeri_v',
'shadowed': True,
'client_test': False,
'unit_test': False,
'es3': True
},
'GetInternalformativ': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLint>'],
'cmd_args':
'GLenumRenderBufferTarget target, GLenumRenderBufferFormat format, '
'GLenumInternalFormatParameter pname, GLint* params',
'es3': True,
},
'GetMaxValueInBufferCHROMIUM': {
'type': 'Is',
'decoder_func': 'DoGetMaxValueInBufferCHROMIUM',
'result': ['GLuint'],
'unit_test': False,
'client_test': False,
'extension': True,
'impl_func': False,
},
'GetProgramiv': {
'type': 'GETn',
'decoder_func': 'DoGetProgramiv',
'result': ['SizedResult<GLint>'],
'expectation': False,
},
'GetProgramInfoCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'extension': 'CHROMIUM_get_multiple',
'client_test': False,
'cmd_args': 'GLidProgram program, uint32_t bucket_id',
'result': [
'uint32_t link_status',
'uint32_t num_attribs',
'uint32_t num_uniforms',
],
},
'GetProgramInfoLog': {
'type': 'STRn',
'expectation': False,
},
'GetProgramInterfaceiv': {
'type': 'GETn',
'decoder_func': 'DoGetProgramInterfaceiv',
'result': ['SizedResult<GLint>'],
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'GetProgramResourceiv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLenum program_interface, GLuint index, '
'uint32_t props_bucket_id, GLint* params',
'result': ['SizedResult<GLint>'],
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'GetProgramResourceIndex': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLenum program_interface, '
'uint32_t name_bucket_id, GLuint* index',
'result': ['GLuint'],
'error_return': 'GL_INVALID_INDEX',
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'GetProgramResourceLocation': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLenum program_interface, '
'uint32_t name_bucket_id, GLint* location',
'result': ['GLint'],
'error_return': -1,
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'GetProgramResourceName': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLenum program_interface, GLuint index, '
'uint32_t name_bucket_id, void* result',
'result': ['int32_t'],
'unit_test': False,
'trace_level': 2,
'es31': True,
},
'GetRenderbufferParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetRenderbufferParameteriv',
'gl_test_func': 'glGetRenderbufferParameterivEXT',
'result': ['SizedResult<GLint>'],
},
'GetSamplerParameterfv': {
'type': 'GETn',
'decoder_func': 'DoGetSamplerParameterfv',
'result': ['SizedResult<GLfloat>'],
'es3': True,
},
'GetSamplerParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetSamplerParameteriv',
'result': ['SizedResult<GLint>'],
'es3': True,
},
'GetShaderiv': {
'type': 'GETn',
'decoder_func': 'DoGetShaderiv',
'result': ['SizedResult<GLint>'],
},
'GetShaderInfoLog': {
'type': 'STRn',
'get_len_func': 'glGetShaderiv',
'get_len_enum': 'GL_INFO_LOG_LENGTH',
'unit_test': False,
},
'GetShaderPrecisionFormat': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLenumShaderType shadertype, GLenumShaderPrecision precisiontype, '
'void* result',
'result': [
'int32_t success',
'int32_t min_range',
'int32_t max_range',
'int32_t precision',
],
},
'GetShaderSource': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_SHADER_SOURCE_LENGTH',
'unit_test': False,
'client_test': False,
},
'GetString': {
'type': 'Custom',
'client_test': False,
'cmd_args': 'GLenumStringType name, uint32_t bucket_id',
},
'GetStringi': {
'type': 'NoCommand',
'es3': True,
},
'GetSynciv': {
'type': 'GETn',
'cmd_args': 'GLuint sync, GLenumSyncParameter pname, void* values',
'decoder_func': 'DoGetSynciv',
'result': ['SizedResult<GLint>'],
'es3': True,
},
'GetTexParameterfv': {
'type': 'GETn',
'decoder_func': 'DoGetTexParameterfv',
'result': ['SizedResult<GLfloat>']
},
'GetTexParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetTexParameteriv',
'result': ['SizedResult<GLint>']
},
'GetTranslatedShaderSourceANGLE': {
'type': 'STRn',
'get_len_func': 'DoGetShaderiv',
'get_len_enum': 'GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE',
'unit_test': False,
'extension': True,
},
'GetUniformBlockIndex': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t name_bucket_id, GLuint* index',
'result': ['GLuint'],
'error_return': 'GL_INVALID_INDEX',
'es3': True,
},
'GetUniformBlocksCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'extension': True,
'client_test': False,
'cmd_args': 'GLidProgram program, uint32_t bucket_id',
'result': ['uint32_t'],
'es3': True,
},
'GetUniformsES3CHROMIUM': {
'type': 'Custom',
'impl_func': False,
'extension': True,
'client_test': False,
'cmd_args': 'GLidProgram program, uint32_t bucket_id',
'result': ['uint32_t'],
'es3': True,
},
'GetTransformFeedbackVarying': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, GLuint index, uint32_t name_bucket_id, '
'void* result',
'result': [
'int32_t success',
'int32_t size',
'uint32_t type',
],
'es3': True,
},
'GetTransformFeedbackVaryingsCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'extension': True,
'client_test': False,
'cmd_args': 'GLidProgram program, uint32_t bucket_id',
'result': ['uint32_t'],
'es3': True,
},
'GetUniformfv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLfloat>'],
},
'GetUniformiv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLint>'],
},
'GetUniformuiv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLuint>'],
'es3': True,
},
'GetUniformIndices': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLuint>'],
'cmd_args': 'GLidProgram program, uint32_t names_bucket_id, '
'GLuint* indices',
'es3': True,
},
'GetUniformLocation': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args':
'GLidProgram program, uint32_t name_bucket_id, GLint* location',
'result': ['GLint'],
# http://www.opengl.org/sdk/docs/man/xhtml/glGetUniformLocation.xml
'error_return': -1,
},
'GetVertexAttribfv': {
'type': 'GETn',
'result': ['SizedResult<GLfloat>'],
'impl_func': False,
'decoder_func': 'DoGetVertexAttribfv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribiv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'impl_func': False,
'decoder_func': 'DoGetVertexAttribiv',
'expectation': False,
'client_test': False,
},
'GetVertexAttribIiv': {
'type': 'GETn',
'result': ['SizedResult<GLint>'],
'impl_func': False,
'decoder_func': 'DoGetVertexAttribIiv',
'expectation': False,
'client_test': False,
'es3': True,
},
'GetVertexAttribIuiv': {
'type': 'GETn',
'result': ['SizedResult<GLuint>'],
'impl_func': False,
'decoder_func': 'DoGetVertexAttribIuiv',
'expectation': False,
'client_test': False,
'es3': True,
},
'GetVertexAttribPointerv': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'result': ['SizedResult<GLuint>'],
'client_test': False,
},
'InvalidateFramebuffer': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoInvalidateFramebuffer',
'unit_test': False,
'es3': True,
},
'InvalidateSubFramebuffer': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoInvalidateSubFramebuffer',
'unit_test': False,
'es3': True,
},
'IsBuffer': {
'type': 'Is',
'decoder_func': 'DoIsBuffer',
'expectation': False,
},
'IsEnabled': {
'type': 'Is',
'decoder_func': 'DoIsEnabled',
'client_test': False,
'impl_func': False,
'expectation': False,
},
'IsEnablediOES': {
'extension_flag': 'oes_draw_buffers_indexed',
'unit_test': False,
'extension': 'OES_draw_buffers_indexed',
'type': 'Is',
'decoder_func': 'DoIsEnablediOES',
'client_test': False,
'impl_func': False,
'expectation': False,
},
'IsFramebuffer': {
'type': 'Is',
'decoder_func': 'DoIsFramebuffer',
'expectation': False,
},
'IsProgram': {
'type': 'Is',
'decoder_func': 'DoIsProgram',
'expectation': False,
},
'IsRenderbuffer': {
'type': 'Is',
'decoder_func': 'DoIsRenderbuffer',
'expectation': False,
},
'IsShader': {
'type': 'Is',
'decoder_func': 'DoIsShader',
'expectation': False,
},
'IsSampler': {
'type': 'Is',
'decoder_func': 'DoIsSampler',
'expectation': False,
'es3': True,
},
'IsSync': {
'type': 'Is',
'cmd_args': 'GLuint sync',
'decoder_func': 'DoIsSync',
'expectation': False,
'es3': True,
},
'IsTexture': {
'type': 'Is',
'decoder_func': 'DoIsTexture',
'expectation': False,
},
'IsTransformFeedback': {
'type': 'Is',
'decoder_func': 'DoIsTransformFeedback',
'expectation': False,
'es3': True,
},
'GetLastFlushIdCHROMIUM': {
'type': 'NoCommand',
'impl_func': False,
'result': ['GLuint'],
'extension': True,
},
'LinkProgram': {
'decoder_func': 'DoLinkProgram',
'impl_func': False,
'trace_level': 1,
},
'MapBufferCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_pixel_transfer_buffer_object",
'trace_level': 1,
},
'MapBufferSubDataCHROMIUM': {
'type': 'NoCommand',
'extension': 'CHROMIUM_map_sub',
'pepper_interface': 'ChromiumMapSub',
'trace_level': 1,
},
'MapTexSubImage2DCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_sub_image",
'pepper_interface': 'ChromiumMapSub',
'trace_level': 1,
},
'MapBufferRange': {
'type': 'Custom',
'data_transfer_methods': ['shm'],
'cmd_args': 'GLenumBufferTarget target, GLintptrNotNegative offset, '
'GLsizeiptr size, GLbitfieldMapBufferAccess access, '
'uint32_t data_shm_id, uint32_t data_shm_offset, '
'uint32_t result_shm_id, uint32_t result_shm_offset',
'es3': True,
'result': ['uint32_t'],
'trace_level': 1,
},
# MemoryBarrierEXT is in order to avoid the conflicting MemoryBarrier macro
# in windows.
'MemoryBarrierEXT': {
'cmd_args': 'GLbitfield barriers',
'unit_test': False,
'trace_level': 2,
'es31': True
},
'MemoryBarrierByRegion': {
'cmd_args': 'GLbitfield barriers',
'unit_test': False,
'trace_level': 2,
'es31': True
},
'MultiDrawBeginCHROMIUM': {
'decoder_func': 'DoMultiDrawBeginCHROMIUM',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'internal': True,
'trace_level': 1,
'impl_func': False,
'unit_test': False,
},
'MultiDrawEndCHROMIUM': {
'decoder_func': 'DoMultiDrawEndCHROMIUM',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'internal': True,
'trace_level': 1,
'impl_func': False,
'unit_test': False,
},
'MultiDrawArraysCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'GLsizei drawcount',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'data_transfer_methods': ['shm'],
'size_args': {
'firsts': 'drawcount * sizeof(GLint)',
'counts': 'drawcount * sizeof(GLsizei)', },
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawArraysInstancedCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'uint32_t instance_counts_shm_id, '
'uint32_t instance_counts_shm_offset, GLsizei drawcount',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'data_transfer_methods': ['shm'],
'size_args': {
'firsts': 'drawcount * sizeof(GLint)',
'counts': 'drawcount * sizeof(GLsizei)',
'instance_counts': 'drawcount * sizeof(GLsizei)', },
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawArraysInstancedBaseInstanceCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'uint32_t instance_counts_shm_id, '
'uint32_t instance_counts_shm_offset, '
'uint32_t baseinstances_shm_id, '
'uint32_t baseinstances_shm_offset, '
'GLsizei drawcount',
'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance',
'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance',
'data_transfer_methods': ['shm'],
'size_args': {
'firsts': 'drawcount * sizeof(GLint)',
'counts': 'drawcount * sizeof(GLsizei)',
'instance_counts': 'drawcount * sizeof(GLsizei)',
'baseinstances': 'drawcount * sizeof(GLuint)',
},
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawElementsCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'GLenumIndexType type, '
'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, '
'GLsizei drawcount',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'data_transfer_methods': ['shm'],
'size_args': {
'counts': 'drawcount * sizeof(GLsizei)',
'offsets': 'drawcount * sizeof(GLsizei)', },
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawElementsInstancedCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'GLenumIndexType type, '
'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, '
'uint32_t instance_counts_shm_id, '
'uint32_t instance_counts_shm_offset, GLsizei drawcount',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
'data_transfer_methods': ['shm'],
'size_args': {
'counts': 'drawcount * sizeof(GLsizei)',
'offsets': 'drawcount * sizeof(GLsizei)',
'instance_counts': 'drawcount * sizeof(GLsizei)', },
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLenumDrawMode mode, '
'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
'GLenumIndexType type, '
'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, '
'uint32_t instance_counts_shm_id, '
'uint32_t instance_counts_shm_offset, '
'uint32_t basevertices_shm_id, '
'uint32_t basevertices_shm_offset, '
'uint32_t baseinstances_shm_id, '
'uint32_t baseinstances_shm_offset, '
'GLsizei drawcount',
'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance',
'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance',
'data_transfer_methods': ['shm'],
'size_args': {
'counts': 'drawcount * sizeof(GLsizei)',
'offsets': 'drawcount * sizeof(GLsizei)',
'instance_counts': 'drawcount * sizeof(GLsizei)',
'basevertices': 'drawcount * sizeof(GLint)',
'baseinstances': 'drawcount * sizeof(GLuint)',
},
'defer_draws': True,
'impl_func': False,
'client_test': False,
'internal': True,
'trace_level': 2,
},
'MultiDrawArraysWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
},
'MultiDrawArraysInstancedWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
},
'MultiDrawArraysInstancedBaseInstanceWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance',
'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance',
},
'MultiDrawElementsWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
},
'MultiDrawElementsInstancedWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw',
'extension_flag': 'webgl_multi_draw',
},
'MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL': {
'type': 'NoCommand',
'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance',
'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance',
},
'PauseTransformFeedback': {
'decoder_func': 'DoPauseTransformFeedback',
'unit_test': False,
'es3': True,
},
'PixelStorei': {
'type': 'Custom',
'impl_func': False,
},
'PostSubBufferCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'extension': True,
},
'ProduceTextureDirectCHROMIUM': {
'decoder_func': 'DoProduceTextureDirectCHROMIUM',
'impl_func': False,
'type': 'PUT',
'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
'unit_test': False,
'client_test': False,
'extension': "CHROMIUM_texture_mailbox",
'trace_level': 1,
},
'RenderbufferStorage': {
'decoder_func': 'DoRenderbufferStorage',
'gl_test_func': 'glRenderbufferStorageEXT',
'expectation': False,
'trace_level': 1,
},
'RenderbufferStorageMultisampleCHROMIUM': {
'cmd_comment':
'// GL_CHROMIUM_framebuffer_multisample\n',
'decoder_func': 'DoRenderbufferStorageMultisampleCHROMIUM',
'gl_test_func': 'glRenderbufferStorageMultisampleCHROMIUM',
'unit_test': False,
'extension': 'chromium_framebuffer_multisample',
'extension_flag': 'chromium_framebuffer_multisample',
'pepper_interface': 'FramebufferMultisample',
'pepper_name': 'RenderbufferStorageMultisampleEXT',
'trace_level': 1,
},
'RenderbufferStorageMultisampleAdvancedAMD': {
'cmd_comment':
'// GL_AMD_framebuffer_multisample_advanced\n',
'decoder_func': 'DoRenderbufferStorageMultisampleAdvancedAMD',
'gl_test_func': 'glRenderbufferStorageMultisampleAdvancedAMD',
'unit_test': False,
'extension': 'amd_framebuffer_multisample_advanced',
'extension_flag': 'amd_framebuffer_multisample_advanced',
'trace_level': 1,
},
'RenderbufferStorageMultisampleEXT': {
'cmd_comment':
'// GL_EXT_multisampled_render_to_texture\n',
'decoder_func': 'DoRenderbufferStorageMultisampleEXT',
'gl_test_func': 'glRenderbufferStorageMultisampleEXT',
'unit_test': False,
'extension': 'EXT_multisampled_render_to_texture',
'extension_flag': 'multisampled_render_to_texture',
'trace_level': 1,
},
'ReadBuffer': {
'es3': True,
'decoder_func': 'DoReadBuffer',
'trace_level': 1,
},
'ReadPixels': {
'cmd_comment':
'// ReadPixels has the result separated from the pixel buffer so that\n'
'// it is easier to specify the result going to some specific place\n'
'// that exactly fits the rectangle of pixels.\n',
'type': 'Custom',
'data_transfer_methods': ['shm'],
'impl_func': False,
'client_test': False,
'cmd_args':
'GLint x, GLint y, GLsizei width, GLsizei height, '
'GLenumReadPixelFormat format, GLenumReadPixelType type, '
'uint32_t pixels_shm_id, uint32_t pixels_shm_offset, '
'uint32_t result_shm_id, uint32_t result_shm_offset, '
'GLboolean async',
'result': [
'uint32_t success',
# Below args exclude out-of-bounds area.
'int32_t row_length',
'int32_t num_rows',
],
'defer_reads': True,
'trace_level': 1,
},
'ReleaseShaderCompiler': {
'decoder_func': 'DoReleaseShaderCompiler',
'unit_test': False,
},
'ResumeTransformFeedback': {
'decoder_func': 'DoResumeTransformFeedback',
'unit_test': False,
'es3': True,
},
'SamplerParameterf': {
'valid_args': {
'2': 'GL_NEAREST'
},
'decoder_func': 'DoSamplerParameterf',
'es3': True,
},
'SamplerParameterfv': {
'type': 'PUT',
'data_value': 'GL_NEAREST',
'count': 1,
'gl_test_func': 'glSamplerParameterf',
'decoder_func': 'DoSamplerParameterfv',
'first_element_only': True,
'es3': True,
},
'SamplerParameteri': {
'valid_args': {
'2': 'GL_NEAREST'
},
'decoder_func': 'DoSamplerParameteri',
'es3': True,
},
'SamplerParameteriv': {
'type': 'PUT',
'data_value': 'GL_NEAREST',
'count': 1,
'gl_test_func': 'glSamplerParameteri',
'decoder_func': 'DoSamplerParameteriv',
'first_element_only': True,
'es3': True,
},
'ShaderBinary': {
'type': 'Custom',
'client_test': False,
},
'ShaderSource': {
'type': 'PUTSTR',
'decoder_func': 'DoShaderSource',
'expectation': False,
'data_transfer_methods': ['bucket'],
'cmd_args':
'GLuint shader, const char** str',
'pepper_args':
'GLuint shader, GLsizei count, const char** str, const GLint* length',
},
'StencilMask': {
'type': 'StateSetFrontBack',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'StencilMaskSeparate': {
'type': 'StateSetFrontBackSeparate',
'state': 'StencilMask',
'no_gl': True,
'expectation': False,
},
'SwapBuffers': {
'impl_func': False,
'decoder_func': 'DoSwapBuffers',
'client_test': False,
'expectation': False,
'extension': True,
'trace_level': 1,
'trace_queueing_flow': True,
},
'SwapBuffersWithBoundsCHROMIUM': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoSwapBuffersWithBoundsCHROMIUM',
'impl_func': False,
'client_test': False,
'unit_test': False,
'extension': True,
},
'TexImage2D': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'client_test': False,
'trace_level': 2,
},
'TexImage3D': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'client_test': False,
'es3': True,
'trace_level': 2,
},
'TexParameterf': {
'decoder_func': 'DoTexParameterf',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameteri': {
'decoder_func': 'DoTexParameteri',
'valid_args': {
'2': 'GL_NEAREST'
},
},
'TexParameterfv': {
'type': 'PUT',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameterfv',
'gl_test_func': 'glTexParameterf',
'first_element_only': True,
},
'TexParameteriv': {
'type': 'PUT',
'data_value': 'GL_NEAREST',
'count': 1,
'decoder_func': 'DoTexParameteriv',
'gl_test_func': 'glTexParameteri',
'first_element_only': True,
},
'TexStorage3D': {
'es3': True,
'unit_test': False,
'decoder_func': 'DoTexStorage3D',
'trace_level': 2,
},
'TexSubImage2D': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'client_test': False,
'trace_level': 2,
'cmd_args': 'GLenumTextureTarget target, GLint level, '
'GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, '
'GLenumTextureFormat format, GLenumPixelType type, '
'const void* pixels, GLboolean internal'
},
'TexSubImage3D': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'client_test': False,
'trace_level': 2,
'cmd_args': 'GLenumTextureTarget target, GLint level, '
'GLint xoffset, GLint yoffset, GLint zoffset, '
'GLsizei width, GLsizei height, GLsizei depth, '
'GLenumTextureFormat format, GLenumPixelType type, '
'const void* pixels, GLboolean internal',
'es3': True,
},
'TransformFeedbackVaryings': {
'type': 'PUTSTR',
'data_transfer_methods': ['bucket'],
'decoder_func': 'DoTransformFeedbackVaryings',
'cmd_args':
'GLuint program, const char** varyings, GLenum buffermode',
'expectation': False,
'es3': True,
},
'Uniform1f': {'type': 'PUTXn', 'count': 1},
'Uniform1fv': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoUniform1fv',
},
'Uniform1i': {'decoder_func': 'DoUniform1i', 'unit_test': False},
'Uniform1iv': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoUniform1iv',
'unit_test': False,
},
'Uniform1ui': {
'type': 'PUTXn',
'count': 1,
'unit_test': False,
'es3': True,
},
'Uniform1uiv': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoUniform1uiv',
'unit_test': False,
'es3': True,
},
'Uniform2i': {'type': 'PUTXn', 'count': 2},
'Uniform2f': {'type': 'PUTXn', 'count': 2},
'Uniform2fv': {
'type': 'PUTn',
'count': 2,
'decoder_func': 'DoUniform2fv',
},
'Uniform2iv': {
'type': 'PUTn',
'count': 2,
'decoder_func': 'DoUniform2iv',
},
'Uniform2ui': {
'type': 'PUTXn',
'count': 2,
'unit_test': False,
'es3': True,
},
'Uniform2uiv': {
'type': 'PUTn',
'count': 2,
'decoder_func': 'DoUniform2uiv',
'unit_test': False,
'es3': True,
},
'Uniform3i': {'type': 'PUTXn', 'count': 3},
'Uniform3f': {'type': 'PUTXn', 'count': 3},
'Uniform3fv': {
'type': 'PUTn',
'count': 3,
'decoder_func': 'DoUniform3fv',
},
'Uniform3iv': {
'type': 'PUTn',
'count': 3,
'decoder_func': 'DoUniform3iv',
},
'Uniform3ui': {
'type': 'PUTXn',
'count': 3,
'unit_test': False,
'es3': True,
},
'Uniform3uiv': {
'type': 'PUTn',
'count': 3,
'decoder_func': 'DoUniform3uiv',
'unit_test': False,
'es3': True,
},
'Uniform4i': {'type': 'PUTXn', 'count': 4},
'Uniform4f': {'type': 'PUTXn', 'count': 4},
'Uniform4fv': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoUniform4fv',
},
'Uniform4iv': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoUniform4iv',
},
'Uniform4ui': {
'type': 'PUTXn',
'count': 4,
'unit_test': False,
'es3': True,
},
'Uniform4uiv': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoUniform4uiv',
'unit_test': False,
'es3': True,
},
'UniformMatrix2fv': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoUniformMatrix2fv',
'unit_test': False,
},
'UniformMatrix2x3fv': {
'type': 'PUTn',
'count': 6,
'decoder_func': 'DoUniformMatrix2x3fv',
'es3': True,
},
'UniformMatrix2x4fv': {
'type': 'PUTn',
'count': 8,
'decoder_func': 'DoUniformMatrix2x4fv',
'es3': True,
},
'UniformMatrix3fv': {
'type': 'PUTn',
'count': 9,
'decoder_func': 'DoUniformMatrix3fv',
'unit_test': False,
},
'UniformMatrix3x2fv': {
'type': 'PUTn',
'count': 6,
'decoder_func': 'DoUniformMatrix3x2fv',
'es3': True,
},
'UniformMatrix3x4fv': {
'type': 'PUTn',
'count': 12,
'decoder_func': 'DoUniformMatrix3x4fv',
'es3': True,
},
'UniformMatrix4fv': {
'type': 'PUTn',
'count': 16,
'decoder_func': 'DoUniformMatrix4fv',
'unit_test': False,
},
'UniformMatrix4x2fv': {
'type': 'PUTn',
'count': 8,
'decoder_func': 'DoUniformMatrix4x2fv',
'es3': True,
},
'UniformMatrix4x3fv': {
'type': 'PUTn',
'count': 12,
'decoder_func': 'DoUniformMatrix4x3fv',
'es3': True,
},
'UniformBlockBinding': {
'type': 'Custom',
'impl_func': False,
'es3': True,
},
'UnmapBufferCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_pixel_transfer_buffer_object",
'trace_level': 1,
},
'UnmapBufferSubDataCHROMIUM': {
'type': 'NoCommand',
'extension': 'CHROMIUM_map_sub',
'pepper_interface': 'ChromiumMapSub',
'trace_level': 1,
},
'UnmapBuffer': {
'type': 'Custom',
'es3': True,
'trace_level': 1,
},
'UnmapTexSubImage2DCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_sub_image",
'pepper_interface': 'ChromiumMapSub',
'trace_level': 1,
},
'UseProgram': {
'type': 'Bind',
'decoder_func': 'DoUseProgram',
},
'ValidateProgram': {'decoder_func': 'DoValidateProgram'},
'VertexAttrib1f': {'decoder_func': 'DoVertexAttrib1f'},
'VertexAttrib1fv': {
'type': 'PUT',
'count': 1,
'decoder_func': 'DoVertexAttrib1fv',
},
'VertexAttrib2f': {'decoder_func': 'DoVertexAttrib2f'},
'VertexAttrib2fv': {
'type': 'PUT',
'count': 2,
'decoder_func': 'DoVertexAttrib2fv',
},
'VertexAttrib3f': {'decoder_func': 'DoVertexAttrib3f'},
'VertexAttrib3fv': {
'type': 'PUT',
'count': 3,
'decoder_func': 'DoVertexAttrib3fv',
},
'VertexAttrib4f': {'decoder_func': 'DoVertexAttrib4f'},
'VertexAttrib4fv': {
'type': 'PUT',
'count': 4,
'decoder_func': 'DoVertexAttrib4fv',
},
'VertexAttribI4i': {
'es3': True,
'decoder_func': 'DoVertexAttribI4i',
},
'VertexAttribI4iv': {
'type': 'PUT',
'count': 4,
'es3': True,
'decoder_func': 'DoVertexAttribI4iv',
},
'VertexAttribI4ui': {
'es3': True,
'decoder_func': 'DoVertexAttribI4ui',
},
'VertexAttribI4uiv': {
'type': 'PUT',
'count': 4,
'es3': True,
'decoder_func': 'DoVertexAttribI4uiv',
},
'VertexAttribIPointer': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLuint indx, GLintVertexAttribSize size, '
'GLenumVertexAttribIType type, GLsizei stride, '
'GLuint offset',
'client_test': False,
'es3': True,
},
'VertexAttribPointer': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLuint indx, GLintVertexAttribSize size, '
'GLenumVertexAttribType type, GLboolean normalized, '
'GLsizei stride, GLuint offset',
'client_test': False,
},
'WaitSync': {
'type': 'Custom',
'cmd_args': 'GLuint sync, GLbitfieldSyncFlushFlags flags, '
'GLuint64 timeout',
'impl_func': False,
'client_test': False,
'es3': True,
'trace_level': 1,
},
'Scissor': {
'type': 'StateSet',
'state': 'Scissor',
'decoder_func': 'DoScissor',
},
'Viewport': {
'impl_func': False,
'decoder_func': 'DoViewport',
},
'ResizeCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLint width, GLint height, GLfloat scale_factor, GLboolean '
'alpha, GLuint shm_id, GLuint shm_offset, GLsizei '
'color_space_size',
'extension': True,
'trace_level': 1,
},
'GetRequestableExtensionsCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'uint32_t bucket_id',
'extension': True,
},
'RequestExtensionCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'uint32_t bucket_id',
'extension': 'CHROMIUM_request_extension',
},
'CopyTextureCHROMIUM': {
'decoder_func': 'DoCopyTextureCHROMIUM',
'unit_test': False,
'extension': "CHROMIUM_copy_texture",
'trace_level': 2,
},
'CopySubTextureCHROMIUM': {
'decoder_func': 'DoCopySubTextureCHROMIUM',
'unit_test': False,
'extension': "CHROMIUM_copy_texture",
'trace_level': 2,
},
'TexStorage2DEXT': {
'unit_test': False,
'extension': 'EXT_texture_storage',
'extension_flag': 'ext_texture_storage',
'decoder_func': 'DoTexStorage2DEXT',
'trace_level': 2,
},
'DrawArraysInstancedANGLE': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, '
'GLsizei primcount',
'extension': 'ANGLE_instanced_arrays',
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
'trace_level': 2,
},
'DrawArraysInstancedBaseInstanceANGLE': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, '
'GLsizei primcount, GLuint baseinstance',
'extension': 'ANGLE_base_vertex_base_instance',
'defer_draws': True,
'trace_level': 2,
},
'DrawBuffersEXT': {
'type': 'PUTn',
'decoder_func': 'DoDrawBuffersEXT',
'count': 1,
'unit_test': False,
# could use 'extension_flag': 'ext_draw_buffers' but currently expected to
# work without.
'extension': 'EXT_draw_buffers',
'pepper_interface': 'DrawBuffers',
'trace_level': 2,
},
'DrawElementsInstancedANGLE': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset, GLsizei primcount',
'extension': 'ANGLE_instanced_arrays',
'client_test': False,
'pepper_interface': 'InstancedArrays',
'defer_draws': True,
'trace_level': 2,
},
'DrawElementsInstancedBaseVertexBaseInstanceANGLE': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumDrawMode mode, GLsizei count, '
'GLenumIndexType type, GLuint index_offset, GLsizei primcount, '
'GLint basevertex, GLuint baseinstance',
'extension': 'ANGLE_base_vertex_base_instance',
'client_test': False,
'defer_draws': True,
'trace_level': 2,
},
'VertexAttribDivisorANGLE': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLuint index, GLuint divisor',
'extension': 'ANGLE_instanced_arrays',
'pepper_interface': 'InstancedArrays',
},
'GenQueriesEXT': {
'type': 'GENn',
'gl_test_func': 'glGenQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
'not_shared': 'True',
'extension': "occlusion_query_EXT",
},
'DeleteQueriesEXT': {
'type': 'DELn',
'gl_test_func': 'glDeleteQueriesARB',
'resource_type': 'Query',
'resource_types': 'Queries',
'unit_test': False,
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'IsQueryEXT': {
'type': 'NoCommand',
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'BeginQueryEXT': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumQueryTarget target, GLidQuery id, void* sync_data',
'data_transfer_methods': ['shm'],
'gl_test_func': 'glBeginQuery',
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'BeginTransformFeedback': {
'decoder_func': 'DoBeginTransformFeedback',
'unit_test': False,
'es3': True,
},
'EndQueryEXT': {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLenumQueryTarget target, GLuint submit_count',
'gl_test_func': 'glEndnQuery',
'client_test': False,
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'EndTransformFeedback': {
'decoder_func': 'DoEndTransformFeedback',
'unit_test': False,
'es3': True,
},
'FlushDriverCachesCHROMIUM': {
'decoder_func': 'DoFlushDriverCachesCHROMIUM',
'unit_test': False,
'extension': True,
'trace_level': 1,
},
'GetQueryivEXT': {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryiv',
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'QueryCounterEXT' : {
'type': 'Custom',
'impl_func': False,
'cmd_args': 'GLidQuery id, GLenumQueryTarget target, '
'void* sync_data, GLuint submit_count',
'data_transfer_methods': ['shm'],
'gl_test_func': 'glQueryCounter',
'extension': "disjoint_timer_query_EXT",
},
'GetQueryObjectivEXT': {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryObjectiv',
'extension': "disjoint_timer_query_EXT",
},
'GetQueryObjectuivEXT': {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryObjectuiv',
'pepper_interface': 'Query',
'extension': "occlusion_query_EXT",
},
'GetQueryObjecti64vEXT': {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryObjecti64v',
'extension': "disjoint_timer_query_EXT",
},
'GetQueryObjectui64vEXT': {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryObjectui64v',
'extension': "disjoint_timer_query_EXT",
},
'SetDisjointValueSyncCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'data_transfer_methods': ['shm'],
'client_test': False,
'cmd_args': 'void* sync_data',
'extension': True,
},
'BindFragDataLocationEXT': {
'type': 'GLchar',
'data_transfer_methods': ['bucket'],
'needs_size': True,
'gl_test_func': 'DoBindFragDataLocationEXT',
'extension': 'EXT_blend_func_extended',
'extension_flag': 'ext_blend_func_extended',
},
'BindFragDataLocationIndexedEXT': {
'type': 'GLchar',
'data_transfer_methods': ['bucket'],
'needs_size': True,
'gl_test_func': 'DoBindFragDataLocationIndexedEXT',
'extension': 'EXT_blend_func_extended',
'extension_flag': 'ext_blend_func_extended',
},
'BindUniformLocationCHROMIUM': {
'type': 'GLchar',
'extension': 'CHROMIUM_bind_uniform_location',
'data_transfer_methods': ['bucket'],
'needs_size': True,
'gl_test_func': 'DoBindUniformLocationCHROMIUM',
},
'InsertEventMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoInsertEventMarkerEXT',
'expectation': False,
'extension': 'EXT_debug_marker',
},
'PushGroupMarkerEXT': {
'type': 'GLcharN',
'decoder_func': 'DoPushGroupMarkerEXT',
'expectation': False,
'extension': 'EXT_debug_marker',
},
'PopGroupMarkerEXT': {
'decoder_func': 'DoPopGroupMarkerEXT',
'expectation': False,
'extension': 'EXT_debug_marker',
'impl_func': False,
},
'GenVertexArraysOES': {
'type': 'GENn',
'extension': 'OES_vertex_array_object',
'gl_test_func': 'glGenVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
'pepper_interface': 'VertexArrayObject',
'not_shared': 'True',
},
'BindVertexArrayOES': {
'type': 'Bind',
'extension': 'OES_vertex_array_object',
'gl_test_func': 'glBindVertexArrayOES',
'decoder_func': 'DoBindVertexArrayOES',
'gen_func': 'GenVertexArraysOES',
'unit_test': False,
'client_test': False,
'pepper_interface': 'VertexArrayObject',
},
'DeleteVertexArraysOES': {
'type': 'DELn',
'extension': 'OES_vertex_array_object',
'gl_test_func': 'glDeleteVertexArraysOES',
'resource_type': 'VertexArray',
'resource_types': 'VertexArrays',
'unit_test': False,
'pepper_interface': 'VertexArrayObject',
},
'IsVertexArrayOES': {
'type': 'Is',
'extension': 'OES_vertex_array_object',
'gl_test_func': 'glIsVertexArrayOES',
'decoder_func': 'DoIsVertexArrayOES',
'unit_test': False,
'pepper_interface': 'VertexArrayObject',
},
'BindTexImage2DCHROMIUM': {
'decoder_func': 'DoBindTexImage2DCHROMIUM',
'unit_test': False,
'extension': "CHROMIUM_image",
},
'BindTexImage2DWithInternalformatCHROMIUM': {
'decoder_func': 'DoBindTexImage2DWithInternalformatCHROMIUM',
'unit_test': False,
'extension': "CHROMIUM_image",
},
'ReleaseTexImage2DCHROMIUM': {
'decoder_func': 'DoReleaseTexImage2DCHROMIUM',
'unit_test': False,
'extension': "CHROMIUM_image",
},
'ShallowFinishCHROMIUM': {
'type': 'NoCommand',
'extension': 'CHROMIUM_ordering_barrier',
},
'OrderingBarrierCHROMIUM': {
'type': 'NoCommand',
'extension': 'CHROMIUM_ordering_barrier',
},
'TraceBeginCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLuint category_bucket_id, GLuint name_bucket_id',
'extension': 'CHROMIUM_trace_marker',
},
'TraceEndCHROMIUM': {
'impl_func': False,
'client_test': False,
'decoder_func': 'DoTraceEndCHROMIUM',
'unit_test': False,
'extension': 'CHROMIUM_trace_marker',
},
'SetActiveURLCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLuint url_bucket_id',
'extension': True,
'chromium': True,
},
'DiscardFramebufferEXT': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoDiscardFramebufferEXT',
'unit_test': False,
'extension': 'EXT_discard_framebuffer',
'extension_flag': 'ext_discard_framebuffer',
'trace_level': 2,
},
'LoseContextCHROMIUM': {
'decoder_func': 'DoLoseContextCHROMIUM',
'unit_test': False,
'extension': 'CHROMIUM_lose_context',
'trace_level': 1,
},
'DiscardBackbufferCHROMIUM': {
'type': 'Custom',
'extension': True,
'trace_level': 2,
},
'ScheduleOverlayPlaneCHROMIUM': {
'type': 'Custom',
'client_test': False,
'extension': 'CHROMIUM_schedule_overlay_plane',
},
'ScheduleCALayerSharedStateCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLfloat opacity, GLboolean is_clipped, '
'GLint sorting_context_id, '
'GLuint shm_id, GLuint shm_offset',
'extension': 'CHROMIUM_schedule_ca_layer',
},
'ScheduleCALayerCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLuint contents_texture_id, GLuint background_color, '
'GLuint edge_aa_mask, GLuint filter, GLuint shm_id, '
'GLuint shm_offset',
'extension': 'CHROMIUM_schedule_ca_layer',
},
'ScheduleCALayerInUseQueryCHROMIUM': {
'type': 'PUTn',
'count': 1,
'decoder_func': 'DoScheduleCALayerInUseQueryCHROMIUM',
'cmd_args': 'GLsizei count, const GLuint* textures',
'extension': 'CHROMIUM_schedule_ca_layer',
'unit_test': False,
},
'ScheduleDCLayerCHROMIUM': {
'cmd_args': 'GLuint texture_0, GLuint texture_1, GLint z_order, '
'GLint content_x, GLint content_y, GLint content_width, '
'GLint content_height, GLint quad_x, GLint quad_y, '
'GLint quad_width, GLint quad_height, '
'GLfloat transform_c1r1, GLfloat transform_c2r1, '
'GLfloat transform_c1r2, GLfloat transform_c2r2, '
'GLfloat transform_tx, GLfloat transform_ty, '
'GLboolean is_clipped, GLint clip_x, GLint clip_y, '
'GLint clip_width, GLint clip_height, '
'GLuint protected_video_type',
'decoder_func': 'DoScheduleDCLayerCHROMIUM',
'extension': 'CHROMIUM_schedule_dc_layer',
'unit_test': False,
},
'CommitOverlayPlanesCHROMIUM': {
'impl_func': False,
'decoder_func': 'DoCommitOverlayPlanes',
'unit_test': False,
'client_test': False,
'extension': 'CHROMIUM_commit_overlay_planes',
},
'SetDrawRectangleCHROMIUM': {
'decoder_func': 'DoSetDrawRectangleCHROMIUM',
'unit_test': False,
'extension': 'CHROMIUM_set_draw_rectangle',
},
'SetEnableDCLayersCHROMIUM': {
'decoder_func': 'DoSetEnableDCLayersCHROMIUM',
'unit_test': False,
'extension': 'CHROMIUM_dc_layers',
},
'InitializeDiscardableTextureCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint texture_id, uint32_t shm_id, '
'uint32_t shm_offset',
'impl_func': False,
'client_test': False,
'extension': True,
},
'UnlockDiscardableTextureCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint texture_id',
'impl_func': False,
'client_test': False,
'extension': True,
},
'LockDiscardableTextureCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint texture_id',
'impl_func': False,
'client_test': False,
'extension': True,
},
'TexStorage2DImageCHROMIUM': {
'decoder_func': 'DoTexStorage2DImageCHROMIUM',
'unit_test': False,
'extension': 'CHROMIUM_texture_storage_image',
'extension_flag': 'chromium_texture_storage_image',
},
'SetColorSpaceMetadataCHROMIUM': {
'type': 'Custom',
'impl_func': False,
'client_test': False,
'cmd_args': 'GLuint texture_id, GLuint shm_id, GLuint shm_offset, '
'GLsizei color_space_size',
'extension': 'CHROMIUM_color_space_metadata',
},
'WindowRectanglesEXT': {
'type': 'PUTn',
'count': 4,
'decoder_func': 'DoWindowRectanglesEXT',
'unit_test': False,
'extension': 'EXT_window_rectangles',
'extension_flag': 'ext_window_rectangles',
'es3': True,
},
'CreateGpuFenceCHROMIUM': {
'type': 'NoCommand',
'impl_func': False,
'cmd_args': 'void',
'result': ['GLuint'],
'extension': 'CHROMIUM_gpu_fence',
},
'CreateGpuFenceINTERNAL': {
'type': 'Custom',
'cmd_args': 'GLuint gpu_fence_id',
'extension': 'CHROMIUM_gpu_fence',
'extension_flag': 'chromium_gpu_fence',
'internal': True,
},
'CreateClientGpuFenceCHROMIUM': {
'type': 'NoCommand',
'impl_func': False,
'cmd_args': 'ClientGpuFence source',
'result': ['GLuint'],
'extension': 'CHROMIUM_gpu_fence',
'extension_flag': 'chromium_gpu_fence',
},
'WaitGpuFenceCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint gpu_fence_id',
'extension': 'CHROMIUM_gpu_fence',
'extension_flag': 'chromium_gpu_fence',
},
'DestroyGpuFenceCHROMIUM': {
'type': 'Custom',
'cmd_args': 'GLuint gpu_fence_id',
'extension': 'CHROMIUM_gpu_fence',
'extension_flag': 'chromium_gpu_fence',
},
'InvalidateReadbackBufferShadowDataCHROMIUM': {
'type': 'NoCommand',
'impl_func': False,
'es3': True,
'extension': 'CHROMIUM_nonblocking_readback',
},
'SetReadbackBufferShadowAllocationINTERNAL': {
'decoder_func': 'DoSetReadbackBufferShadowAllocationINTERNAL',
'client_test': False,
'unit_test': False,
'impl_func': True,
'internal': True,
'es3': True,
},
'FramebufferParameteri': {
'decoder_func': 'DoFramebufferParameteri',
'unit_test': False,
'extension': 'MESA_framebuffer_flip_y',
'extension_flag': 'mesa_framebuffer_flip_y',
},
'FramebufferTextureMultiviewOVR': {
'decoder_func': 'DoFramebufferTextureMultiviewOVR',
'unit_test': False,
'extension': 'OVR_multiview2',
'extension_flag': 'ovr_multiview2',
'trace_level': 1,
'es3': True
},
'MaxShaderCompilerThreadsKHR': {
'cmd_args': 'GLuint count',
'unit_test': False,
'client_test': False,
'extension': 'KHRParallelShaderCompile',
'extension_flag': 'khr_parallel_shader_compile',
},
'CreateAndTexStorage2DSharedImageCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_shared_image",
'trace_level': 2,
},
'CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM': {
'type': 'NoCommand',
'extension': "CHROMIUM_shared_image",
'trace_level': 2,
},
'CreateAndTexStorage2DSharedImageINTERNAL': {
'decoder_func': 'DoCreateAndTexStorage2DSharedImageINTERNAL',
'internal': True,
'type': 'PUT',
'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
'impl_func': False,
'unit_test': False,
'trace_level': 2,
},
'BeginSharedImageAccessDirectCHROMIUM': {
'decoder_func': 'DoBeginSharedImageAccessDirectCHROMIUM',
'extension': 'CHROMIUM_shared_image',
'unit_test': False,
'client_test': False,
'cmd_args': 'GLuint texture, GLenumSharedImageAccessMode mode',
},
'EndSharedImageAccessDirectCHROMIUM': {
'decoder_func': 'DoEndSharedImageAccessDirectCHROMIUM',
'extension': 'CHROMIUM_shared_image',
'unit_test': False,
},
'BeginBatchReadAccessSharedImageCHROMIUM': {
'decoder_func': 'DoBeginBatchReadAccessSharedImageCHROMIUM',
'extension': 'CHROMIUM_shared_image',
'unit_test': False,
},
'EndBatchReadAccessSharedImageCHROMIUM': {
'decoder_func': 'DoEndBatchReadAccessSharedImageCHROMIUM',
'extension': 'CHROMIUM_shared_image',
'unit_test': False,
}
}
def main(argv):
"""This is the main function."""
parser = OptionParser()
parser.add_option(
"--output-dir",
help="Output directory for generated files. Defaults to chromium root "
"directory.")
parser.add_option(
"-v", "--verbose", action="store_true", help="Verbose logging output.")
parser.add_option(
"-c", "--check", action="store_true",
help="Check if output files match generated files in chromium root "
"directory. Use this in PRESUBMIT scripts with --output-dir.")
(options, _) = parser.parse_args(args=argv)
# Add in states and capabilites to GLState
gl_state_valid = _NAMED_TYPE_INFO['GLState']['valid']
gl_state_valid_es3 = _NAMED_TYPE_INFO['GLState']['valid_es3']
for state_name in sorted(build_cmd_buffer_lib._STATE_INFO):
state = build_cmd_buffer_lib._STATE_INFO[state_name]
if 'extension_flag' in state:
continue
if 'enum' in state:
if not state['enum'] in gl_state_valid:
gl_state_valid.append(state['enum'])
else:
for item in state['states']:
if 'extension_flag' in item:
continue
if 'es3' in item:
assert item['es3']
if not item['enum'] in gl_state_valid_es3:
gl_state_valid_es3.append(item['enum'])
else:
if not item['enum'] in gl_state_valid:
gl_state_valid.append(item['enum'])
for capability in build_cmd_buffer_lib._CAPABILITY_FLAGS:
if 'extension_flag' in capability:
continue
valid_value = "GL_%s" % capability['name'].upper()
if not valid_value in gl_state_valid:
gl_state_valid.append(valid_value)
# This script lives under src/gpu/command_buffer.
script_dir = os.path.dirname(os.path.abspath(__file__))
assert script_dir.endswith(os.path.normpath("src/gpu/command_buffer"))
# os.path.join doesn't do the right thing with relative paths.
chromium_root_dir = os.path.abspath(script_dir + "/../..")
# Support generating files under gen/ and for PRESUBMIT.
if options.output_dir:
output_dir = options.output_dir
else:
output_dir = chromium_root_dir
os.chdir(output_dir)
build_cmd_buffer_lib.InitializePrefix("GLES2")
gen = build_cmd_buffer_lib.GLGenerator(
options.verbose, "2014", _FUNCTION_INFO, _NAMED_TYPE_INFO,
chromium_root_dir)
gen.ParseGLH("gpu/command_buffer/gles2_cmd_buffer_functions.txt")
gen.WritePepperGLES2Interface("ppapi/api/ppb_opengles2.idl", False)
gen.WritePepperGLES2Interface("ppapi/api/dev/ppb_opengles2ext_dev.idl", True)
gen.WriteGLES2ToPPAPIBridge("ppapi/lib/gl/gles2/gles2.c")
gen.WritePepperGLES2Implementation(
"ppapi/shared_impl/ppb_opengles2_shared.cc")
gen.WriteCommandIds("gpu/command_buffer/common/gles2_cmd_ids_autogen.h")
gen.WriteFormat("gpu/command_buffer/common/gles2_cmd_format_autogen.h")
gen.WriteFormatTest(
"gpu/command_buffer/common/gles2_cmd_format_test_autogen.h")
gen.WriteGLES2InterfaceHeader(
"gpu/command_buffer/client/gles2_interface_autogen.h")
gen.WriteGLES2InterfaceStub(
"gpu/command_buffer/client/gles2_interface_stub_autogen.h")
gen.WriteGLES2InterfaceStubImpl(
"gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h")
gen.WriteGLES2ImplementationHeader(
"gpu/command_buffer/client/gles2_implementation_autogen.h")
gen.WriteGLES2Implementation(
"gpu/command_buffer/client/gles2_implementation_impl_autogen.h")
gen.WriteGLES2ImplementationUnitTests(
"gpu/command_buffer/client/gles2_implementation_unittest_autogen.h")
gen.WriteGLES2TraceImplementationHeader(
"gpu/command_buffer/client/gles2_trace_implementation_autogen.h")
gen.WriteGLES2TraceImplementation(
"gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h")
gen.WriteGLES2CLibImplementation(
"gpu/command_buffer/client/gles2_c_lib_autogen.h")
gen.WriteCmdHelperHeader(
"gpu/command_buffer/client/gles2_cmd_helper_autogen.h")
gen.WriteServiceImplementation(
"gpu/command_buffer/service/gles2_cmd_decoder_autogen.h")
gen.WritePassthroughServiceImplementation(
"gpu/command_buffer/service/" +
"gles2_cmd_decoder_passthrough_handlers_autogen.cc")
gen.WriteServiceContextStateHeader(
"gpu/command_buffer/service/context_state_autogen.h")
gen.WriteServiceContextStateImpl(
"gpu/command_buffer/service/context_state_impl_autogen.h")
gen.WriteServiceContextStateTestHelpers(
"gpu/command_buffer/service/context_state_test_helpers_autogen.h")
gen.WriteClientContextStateHeader(
"gpu/command_buffer/client/client_context_state_autogen.h")
gen.WriteClientContextStateImpl(
"gpu/command_buffer/client/client_context_state_impl_autogen.h")
gen.WriteServiceUnitTests(
"gpu/command_buffer/service/gles2_cmd_decoder_unittest_%d_autogen.h")
gen.WriteServiceUnitTestsForExtensions(
"gpu/command_buffer/service/"
"gles2_cmd_decoder_unittest_extensions_autogen.h")
gen.WriteServiceUtilsHeader(
"gpu/command_buffer/service/gles2_cmd_validation_autogen.h")
gen.WriteServiceUtilsImplementation(
"gpu/command_buffer/service/"
"gles2_cmd_validation_implementation_autogen.h")
gen.WriteCommonUtilsHeader(
"gpu/command_buffer/common/gles2_cmd_utils_autogen.h")
gen.WriteCommonUtilsImpl(
"gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h")
gen.WriteGLES2Header("gpu/GLES2/gl2chromium_autogen.h")
build_cmd_buffer_lib.Format(gen.generated_cpp_filenames, output_dir,
chromium_root_dir)
if gen.errors > 0:
print "build_gles2_cmd_buffer.py: Failed with %d errors" % gen.errors
return 1
check_failed_filenames = []
if options.check:
for filename in gen.generated_cpp_filenames:
if not filecmp.cmp(os.path.join(output_dir, filename),
os.path.join(chromium_root_dir, filename)):
check_failed_filenames.append(filename)
if len(check_failed_filenames) > 0:
print 'Please run gpu/command_buffer/build_gles2_cmd_buffer.py'
print 'Failed check on autogenerated command buffer files:'
for filename in check_failed_filenames:
print filename
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
chromium/chromium
|
gpu/command_buffer/build_gles2_cmd_buffer.py
|
Python
|
bsd-3-clause
| 116,487
|
from __future__ import print_function
import os.path
from binding import LLVM_VERSION
above_33 = ("MC")
last_mtime = 0
def _init(root=__name__, file=__file__):
"""
This is reimported in everything subpackages and must be run in the
__init__.py of them.
"""
global last_mtime
base = os.path.dirname(file)
for fname in sorted(os.listdir(base)):
is_python_script = fname.endswith('.py') or fname.endswith('.pyc')
is_init_script = fname.startswith('__init__')
is_directory = os.path.isdir(os.path.join(base, fname))
is_python_module = is_directory and not fname.startswith('__')
if (is_python_module or is_python_script) and not is_init_script:
#print(fname)
if fname in above_33 and LLVM_VERSION <= (3, 3):
print("skip %s because llvm version is not above 3.3" % fname)
continue
modname = os.path.basename(fname).rsplit('.', 1)[0]
#importlib.import_module('.' + modname, __name__)
__import__('.'.join([root, modname]))
mtime = os.path.getmtime(os.path.join(base, fname))
last_mtime = max(last_mtime, mtime)
_init()
|
llvmpy/llvmpy
|
llvmpy/src/__init__.py
|
Python
|
bsd-3-clause
| 1,204
|
# Copyright (C) 2004-2006 Python Software Foundation
# Authors: Baxter, Wouters and Warsaw
# Contact: email-sig@python.org
"""FeedParser - An email feed parser.
The feed parser implements an interface for incrementally parsing an email
message, line by line. This has advantages for certain applications, such as
those reading email messages off a socket.
FeedParser.feed() is the primary interface for pushing new data into the
parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
The other advantage of this parser is that it will never raise a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
__all__ = ['FeedParser', 'BytesFeedParser']
import re
from email import errors
from email._policybase import compat32
from collections import deque
from io import StringIO
NLCRE = re.compile(r'\r\n|\r|\n')
NLCRE_bol = re.compile(r'(\r\n|\r|\n)')
NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z')
NLCRE_crack = re.compile(r'(\r\n|\r|\n)')
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
# except controls, SP, and ":".
headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])')
EMPTYSTRING = ''
NL = '\n'
NeedMoreData = object()
class BufferedSubFile(object):
"""A file-ish object that can have new data loaded into it.
You can also push and pop line-matching predicates onto a stack. When the
current predicate matches the current line, a false EOF response
(i.e. empty string) is returned instead. This lets the parser adhere to a
simple abstraction -- it parses until EOF closes the current message.
"""
def __init__(self):
# Text stream of the last partial line pushed into this object.
# See issue 22233 for why this is a text stream and not a list.
self._partial = StringIO(newline='')
# A deque of full, pushed lines
self._lines = deque()
# The stack of false-EOF checking predicates.
self._eofstack = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def push_eof_matcher(self, pred):
self._eofstack.append(pred)
def pop_eof_matcher(self):
return self._eofstack.pop()
def close(self):
# Don't forget any trailing partial line.
self._partial.seek(0)
self.pushlines(self._partial.readlines())
self._partial.seek(0)
self._partial.truncate()
self._closed = True
def readline(self):
if not self._lines:
if self._closed:
return ''
return NeedMoreData
# Pop the line off the stack and see if it matches the current
# false-EOF predicate.
line = self._lines.popleft()
# RFC 2046, section 5.1.2 requires us to recognize outer level
# boundaries at any level of inner nesting. Do this, but be sure it's
# in the order of most to least nested.
for ateof in reversed(self._eofstack):
if ateof(line):
# We're at the false EOF. But push the last line back first.
self._lines.appendleft(line)
return ''
return line
def unreadline(self, line):
# Let the consumer push a line back into the buffer.
assert line is not NeedMoreData
self._lines.appendleft(line)
def push(self, data):
"""Push some new data into this object."""
self._partial.write(data)
if '\n' not in data and '\r' not in data:
# No new complete lines, wait for more.
return
# Crack into lines, preserving the linesep characters.
self._partial.seek(0)
parts = self._partial.readlines()
self._partial.seek(0)
self._partial.truncate()
# If the last element of the list does not end in a newline, then treat
# it as a partial line. We only check for '\n' here because a line
# ending with '\r' might be a line that was split in the middle of a
# '\r\n' sequence (see bugs 1555570 and 1721862).
if not parts[-1].endswith('\n'):
self._partial.write(parts.pop())
self.pushlines(parts)
def pushlines(self, lines):
self._lines.extend(lines)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if line == '':
raise StopIteration
return line
class FeedParser:
"""A feed-style parser of email."""
def __init__(self, _factory=None, *, policy=compat32):
"""_factory is called with no arguments to create a new message obj
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self.policy = policy
self._old_style_factory = False
if _factory is None:
if policy.message_factory is None:
from email.message import Message
self._factory = Message
else:
self._factory = policy.message_factory
else:
self._factory = _factory
try:
_factory(policy=self.policy)
except TypeError:
# Assume this is an old-style factory
self._old_style_factory = True
self._input = BufferedSubFile()
self._msgstack = []
self._parse = self._parsegen().__next__
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
def _set_headersonly(self):
self._headersonly = True
def feed(self, data):
"""Push more data into the parser."""
self._input.push(data)
self._call_parse()
def _call_parse(self):
try:
self._parse()
except StopIteration:
pass
def close(self):
"""Parse all remaining data and return the root message object."""
self._input.close()
self._call_parse()
root = self._pop_message()
assert not self._msgstack
# Look for final set of defects
if root.get_content_maintype() == 'multipart' \
and not root.is_multipart():
defect = errors.MultipartInvariantViolationDefect()
self.policy.handle_defect(root, defect)
return root
def _new_message(self):
if self._old_style_factory:
msg = self._factory()
else:
msg = self._factory(policy=self.policy)
if self._cur and self._cur.get_content_type() == 'multipart/digest':
msg.set_default_type('message/rfc822')
if self._msgstack:
self._msgstack[-1].attach(msg)
self._msgstack.append(msg)
self._cur = msg
self._last = msg
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
def _parsegen(self):
# Create a new message and start by parsing headers.
self._new_message()
headers = []
# Collect the headers, searching for a line that doesn't match the RFC
# 2822 header or continuation pattern (including an empty line).
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
if not headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if not NLCRE.match(line):
defect = errors.MissingHeaderBodySeparatorDefect()
self.policy.handle_defect(self._cur, defect)
self._input.unreadline(line)
break
headers.append(line)
# Done with the headers, so parse them and figure out what we're
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
# necessary in the older parser, which could raise errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
if self._cur.get_content_type() == 'message/delivery-status':
# message/delivery-status contains blocks of headers separated by
# a blank line. We'll represent each header block as a separate
# nested message object, but the processing is a bit different
# than standard message/* types because there is no body for the
# nested messages. A blank line separates the subparts.
while True:
self._input.push_eof_matcher(NLCRE.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
msg = self._pop_message()
# We need to pop the EOF matcher in order to tell if we're at
# the end of the current file, not the end of the last block
# of message headers.
self._input.pop_eof_matcher()
# The input stream must be sitting at the newline or at the
# EOF. We want to see if we're at the end of this subpart, so
# first consume the blank line, then test the next line to see
# if we're at this subpart's EOF.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
if line == '':
break
# Not at EOF so this is a line we're going to need.
self._input.unreadline(line)
return
if self._cur.get_content_maintype() == 'message':
# The message claims to be a message/* type, then what follows is
# another RFC 2822 message.
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
self._pop_message()
return
if self._cur.get_content_maintype() == 'multipart':
boundary = self._cur.get_boundary()
if boundary is None:
# The message /claims/ to be a multipart but it has not
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
defect = errors.NoBoundaryInMultipartDefect()
self.policy.handle_defect(self._cur, defect)
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
# Make sure a valid content type was specified per RFC 2045:6.4.
if (self._cur.get('content-transfer-encoding', '8bit').lower()
not in ('7bit', '8bit', 'binary')):
defect = errors.InvalidMultipartContentTransferEncodingDefect()
self.policy.handle_defect(self._cur, defect)
# Create a line match predicate which matches the inter-part
# boundary as well as the end-of-multipart boundary. Don't push
# this onto the input stream until we've scanned past the
# preamble.
separator = '--' + boundary
boundaryre = re.compile(
'(?P<sep>' + re.escape(separator) +
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
capturing_preamble = True
preamble = []
linesep = False
close_boundary_seen = False
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
mo = boundaryre.match(line)
if mo:
# If we're looking at the end boundary, we're done with
# this multipart. If there was a newline at the end of
# the closing boundary, then we need to initialize the
# epilogue with the empty string (see below).
if mo.group('end'):
close_boundary_seen = True
linesep = mo.group('linesep')
break
# We saw an inter-part boundary. Were we in the preamble?
if capturing_preamble:
if preamble:
# According to RFC 2046, the last newline belongs
# to the boundary.
lastline = preamble[-1]
eolmo = NLCRE_eol.search(lastline)
if eolmo:
preamble[-1] = lastline[:-len(eolmo.group(0))]
self._cur.preamble = EMPTYSTRING.join(preamble)
capturing_preamble = False
self._input.unreadline(line)
continue
# We saw a boundary separating two parts. Consume any
# multiple boundary lines that may be following. Our
# interpretation of RFC 2046 BNF grammar does not produce
# body parts within such double boundaries.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
mo = boundaryre.match(line)
if not mo:
self._input.unreadline(line)
break
# Recurse to parse this subpart; the input stream points
# at the subpart's first line.
self._input.push_eof_matcher(boundaryre.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
# Because of RFC 2046, the newline preceding the boundary
# separator actually belongs to the boundary, not the
# previous subpart's payload (or epilogue if the previous
# part is a multipart).
if self._last.get_content_maintype() == 'multipart':
epilogue = self._last.epilogue
if epilogue == '':
self._last.epilogue = None
elif epilogue is not None:
mo = NLCRE_eol.search(epilogue)
if mo:
end = len(mo.group(0))
self._last.epilogue = epilogue[:-end]
else:
payload = self._last._payload
if isinstance(payload, str):
mo = NLCRE_eol.search(payload)
if mo:
payload = payload[:-len(mo.group(0))]
self._last._payload = payload
self._input.pop_eof_matcher()
self._pop_message()
# Set the multipart up for newline cleansing, which will
# happen if we're in a nested multipart.
self._last = self._cur
else:
# I think we must be in the preamble
assert capturing_preamble
preamble.append(line)
# We've seen either the EOF or the end boundary. If we're still
# capturing the preamble, we never saw the start boundary. Note
# that as a defect and store the captured text as the payload.
if capturing_preamble:
defect = errors.StartBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# If we're not processing the preamble, then we might have seen
# EOF without seeing that end boundary...that is also a defect.
if not close_boundary_seen:
defect = errors.CloseBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
return
# Everything from here to the EOF is epilogue. If the end boundary
# ended in a newline, we'll need to make sure the epilogue isn't
# None
if linesep:
epilogue = ['']
else:
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
epilogue.append(line)
# Any CRLF at the front of the epilogue is not technically part of
# the epilogue. Also, watch out for an empty string epilogue,
# which means a single newline.
if epilogue:
firstline = epilogue[0]
bolmo = NLCRE_bol.match(firstline)
if bolmo:
epilogue[0] = firstline[len(bolmo.group(0)):]
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# Otherwise, it's some non-multipart type, so the entire rest of the
# file contents becomes the payload.
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
def _parse_headers(self, lines):
# Passed a list of lines that make up the headers for the current msg
lastheader = ''
lastvalue = []
for lineno, line in enumerate(lines):
# Check for continuation
if line[0] in ' \t':
if not lastheader:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
defect = errors.FirstHeaderLineIsContinuationDefect(line)
self.policy.handle_defect(self._cur, defect)
continue
lastvalue.append(line)
continue
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
lastheader, lastvalue = '', []
# Check for envelope header, i.e. unix-from
if line.startswith('From '):
if lineno == 0:
# Strip off the trailing newline
mo = NLCRE_eol.search(line)
if mo:
line = line[:-len(mo.group(0))]
self._cur.set_unixfrom(line)
continue
elif lineno == len(lines) - 1:
# Something looking like a unix-from at the end - it's
# probably the first line of the body, so push back the
# line and stop.
self._input.unreadline(line)
return
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
# There will always be a colon, because if there wasn't the part of
# the parser that calls us would have started parsing the body.
i = line.find(':')
# If the colon is on the start of the line the header is clearly
# malformed, but we might be able to salvage the rest of the
# message. Track the error but keep going.
if i == 0:
defect = errors.InvalidHeaderDefect("Missing header name.")
self._cur.defects.append(defect)
continue
assert i>0, "_parse_headers fed line with no : and no leading WS"
lastheader = line[:i]
lastvalue = [line]
# Done with all the lines, so handle the last header.
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
class BytesFeedParser(FeedParser):
"""Like FeedParser, but feed accepts bytes."""
def feed(self, data):
super().feed(data.decode('ascii', 'surrogateescape'))
|
prefetchnta/questlab
|
bin/x64bin/python/36/Lib/email/feedparser.py
|
Python
|
lgpl-2.1
| 23,311
|
"""Various support methods for stresses"""
import numpy as np
import functools
import time
class Timer(object):
"""Simple timer used to measure real and cpu time of stresses."""
def __init__(self):
self.spent = "undefined", "undefined"
def start(self):
self.start_time = np.array([time.time(), time.clock()])
def stop(self):
self.spent = np.array([time.time(), time.clock()]) - self.start_time
def __str__(self):
return "Time spent: {0}s real time; {1}s CPU time".format(self.spent[0],
self.spent[1])
def timed(func):
"""Decorator to mark a test as timed, provides timer argument"""
@functools.wraps(func)
def wrapper(self):
timer = Timer()
func(self, timer)
print "{0}(): {1}".format(func.__name__, timer)
return wrapper
|
strohel/PyBayes
|
pybayes/stresses/support.py
|
Python
|
gpl-2.0
| 839
|
import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that taks a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
return None
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [''] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [''] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError(f"Invalid physical type: {physical_type}")
|
stargaser/astropy
|
astropy/wcs/wcsapi/low_level_api.py
|
Python
|
bsd-3-clause
| 14,978
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2018-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
Functions for parsers.
"""
import argparse
import os.path as op
import sys
import rapidtide.filter as tide_filt
import rapidtide.io as tide_io
class IndicateSpecifiedAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
setattr(namespace, self.dest + "_nondefault", True)
def setifnotset(thedict, thekey, theval):
if (thekey + "_nondefault") not in thedict.keys():
print("overriding " + thekey)
thedict[thekey] = theval
def is_valid_file(parser, arg):
"""
Check if argument is existing file.
"""
if arg is not None:
thefilename, colspec = tide_io.parsefilespec(arg)
else:
thefilename = None
if not op.isfile(thefilename) and thefilename is not None:
parser.error("The file {0} does not exist!".format(thefilename))
return arg
def invert_float(parser, arg):
"""
Check if argument is float or auto.
"""
arg = is_float(parser, arg)
if arg != "auto":
arg = 1.0 / arg
return arg
def is_float(parser, arg):
"""
Check if argument is float or auto.
"""
if arg != "auto":
try:
arg = float(arg)
except parser.error:
parser.error('Value {0} is not a float or "auto"'.format(arg))
return arg
def is_int(parser, arg):
"""
Check if argument is int or auto.
"""
if arg != "auto":
try:
arg = int(arg)
except parser.error:
parser.error('Value {0} is not an int or "auto"'.format(arg))
return arg
def is_range(parser, arg):
"""
Check if argument is min/max pair.
"""
if arg is not None and len(arg) != 2:
parser.error("Argument must be min/max pair.")
elif arg is not None and float(arg[0]) > float(arg[1]):
parser.error("Argument min must be lower than max.")
return arg
DEFAULT_FILTER_ORDER = 6
DEFAULT_PAD_SECONDS = 30.0
DEFAULT_PERMUTATIONMETHOD = "shuffle"
DEFAULT_NORMTYPE = "stddev"
DEFAULT_FILTERBAND = "lfo"
DEFAULT_FILTERTYPE = "trapezoidal"
DEFAULT_PADVAL = 0
DEFAULT_WINDOWFUNC = "hamming"
def addreqinputniftifile(parser, varname, addedtext=""):
parser.add_argument(
varname,
type=lambda x: is_valid_file(parser, x),
help="Input NIFTI file name. " + addedtext,
)
def addreqoutputniftifile(parser, varname, addedtext=""):
parser.add_argument(
varname,
type=str,
help="Output NIFTI file name. " + addedtext,
)
def addreqinputtextfile(parser, varname, onecol=False):
if onecol:
colspecline = (
"Use [:COLUMN] to select which column to use, where COLUMN is an "
"integer or a column name (if input file is BIDS)."
)
else:
colspecline = (
"Use [:COLSPEC] to select which column(s) to use, where COLSPEC is an "
"integer, a column separated list of ranges, or a comma "
"separated set of column names (if input file is BIDS). Default is to use all columns"
)
parser.add_argument(
varname,
type=lambda x: is_valid_file(parser, x),
help="Text file containing one or more timeseries columns. " + colspecline,
)
def addreqinputtextfiles(parser, varname, numreq="Two", nargs="*", onecol=False):
if onecol:
colspecline = (
"Use [:COLUMN] to select which column to use, where COLUMN is an "
"integer or a column name (if input file is BIDS)."
)
else:
colspecline = (
"Use [:COLSPEC] to select which column(s) to use, where COLSPEC is an "
"integer, a column separated list of ranges, or a comma "
"separated set of column names (if input file is BIDS). Default is to use all columns."
)
parser.add_argument(
varname,
nargs=nargs,
type=lambda x: is_valid_file(parser, x),
help=numreq + " text files containing one or more timeseries columns. " + colspecline,
)
def addreqoutputtextfile(parser, varname, rootname=False):
if rootname:
helpline = "Root name for the output files"
else:
helpline = "Name of the output text file."
parser.add_argument(
varname,
type=str,
help=helpline,
)
def addnormalizationopts(parser, normtarget="timecourse", defaultmethod=DEFAULT_NORMTYPE):
norm_opts = parser.add_argument_group("Normalization options")
norm_opts.add_argument(
"--normmethod",
dest="normmethod",
action="store",
type=str,
choices=["None", "percent", "variance", "stddev", "z", "p2p", "mad"],
help=(
f"Demean and normalize {normtarget} "
"using one of the following methods: "
'"None" - demean only; '
'"percent" - divide by mean; '
'"variance" - divide by variance; '
'"stddev" or "z" - divide by standard deviation; '
'"p2p" - divide by range; '
'"mad" - divide by median absolute deviation. '
f'Default is "{defaultmethod}".'
),
default=defaultmethod,
)
def addfilteropts(
parser, filtertarget="timecourses", defaultmethod=DEFAULT_FILTERBAND, details=False
):
filt_opts = parser.add_argument_group("Filtering options")
filt_opts.add_argument(
"--filterband",
dest="filterband",
action="store",
type=str,
choices=["None", "vlf", "lfo", "resp", "cardiac", "lfo_legacy"],
help=(
f'Filter {filtertarget} to specific band. Use "None" to disable filtering. '
f'Default is "{defaultmethod}".'
),
default=defaultmethod,
)
filt_opts.add_argument(
"--filterfreqs",
dest="passvec",
action="store",
nargs=2,
type=float,
metavar=("LOWERPASS", "UPPERPASS"),
help=(
"Filter " + filtertarget + " to retain LOWERPASS to "
"UPPERPASS. If --filterstopfreqs is not also specified, "
"LOWERSTOP and UPPERSTOP will be calculated "
"automatically. "
),
default=None,
)
filt_opts.add_argument(
"--filterstopfreqs",
dest="stopvec",
action="store",
nargs=2,
type=float,
metavar=("LOWERSTOP", "UPPERSTOP"),
help=(
"Filter " + filtertarget + " to with stop frequencies LOWERSTOP and UPPERSTOP. "
"LOWERSTOP must be <= LOWERPASS, UPPERSTOP must be >= UPPERPASS. "
"Using this argument requires the use of --filterfreqs."
),
default=None,
)
if details:
filt_opts.add_argument(
"--filtertype",
dest="filtertype",
action="store",
type=str,
choices=["trapezoidal", "brickwall", "butterworth"],
help=(
f"Filter {filtertarget} "
"using a trapezoidal FFT, brickwall FFT, or "
"butterworth bandpass filter. "
f'Default is "{DEFAULT_FILTERTYPE}".'
),
default=DEFAULT_FILTERTYPE,
)
filt_opts.add_argument(
"--butterorder",
dest="filtorder",
action="store",
type=int,
metavar="ORDER",
help=(
"Set order of butterworth filter (if used). " f"Default is {DEFAULT_FILTER_ORDER}."
),
default=DEFAULT_FILTER_ORDER,
)
filt_opts.add_argument(
"--padseconds",
dest="padseconds",
action="store",
type=float,
metavar="SECONDS",
help=(
"The number of seconds of padding to add to each end of a "
"filtered timecourse "
f"to reduce end effects. Default is {DEFAULT_PAD_SECONDS}."
),
default=DEFAULT_PAD_SECONDS,
)
def postprocessfilteropts(args, debug=False):
# configure the filter
# set the trapezoidal flag, if using
try:
thetype = args.filtertype
except AttributeError:
args.filtertype = "trapezoidal"
try:
theorder = args.filtorder
except AttributeError:
args.filtorder = DEFAULT_FILTER_ORDER
try:
thepadseconds = args.padseconds
except AttributeError:
args.padseconds = DEFAULT_PAD_SECONDS
# if passvec, or passvec and stopvec, are set, we are going set up an arbpass filter
args.arbvec = None
if debug:
print("before preprocessing")
print("\targs.arbvec:", args.arbvec)
print("\targs.passvec:", args.passvec)
print("\targs.stopvec:", args.stopvec)
print("\targs.filterband:", args.filterband)
if args.stopvec is not None:
if args.passvec is not None:
args.arbvec = [args.passvec[0], args.passvec[1], args.stopvec[0], args.stopvec[1]]
else:
raise ValueError("--filterfreqs must be used if --filterstopfreqs is specified")
else:
if args.passvec is not None:
args.arbvec = [
args.passvec[0],
args.passvec[1],
args.passvec[0] * 0.95,
args.passvec[1] * 1.05,
]
if args.arbvec is not None:
# NOTE - this vector is LOWERPASS, UPPERPASS, LOWERSTOP, UPPERSTOP
# setfreqs expects LOWERSTOP, LOWERPASS, UPPERPASS, UPPERSTOP
theprefilter = tide_filt.NoncausalFilter(
"arb",
transferfunc=args.filtertype,
)
theprefilter.setfreqs(args.arbvec[2], args.arbvec[0], args.arbvec[1], args.arbvec[3])
else:
theprefilter = tide_filt.NoncausalFilter(
args.filterband,
transferfunc=args.filtertype,
padtime=args.padseconds,
)
# set the butterworth order
theprefilter.setbutterorder(args.filtorder)
if debug:
print("before preprocessing")
print("\targs.arbvec:", args.arbvec)
print("\targs.passvec:", args.passvec)
print("\targs.stopvec:", args.stopvec)
print("\targs.filterband:", args.filterband)
(
args.lowerstop,
args.lowerpass,
args.upperpass,
args.upperstop,
) = theprefilter.getfreqs()
if debug:
print("after getfreqs")
print("\targs.arbvec:", args.arbvec)
return args, theprefilter
def addwindowopts(parser, windowtype=DEFAULT_WINDOWFUNC):
wfunc = parser.add_argument_group("Windowing options")
wfunc.add_argument(
"--windowfunc",
dest="windowfunc",
action="store",
type=str,
choices=["hamming", "hann", "blackmanharris", "None"],
help=(
"Window function to use prior to correlation. "
"Options are hamming, hann, "
f"blackmanharris, and None. Default is {windowtype}"
),
default=windowtype,
)
wfunc.add_argument(
"--nowindow",
dest="windowfunc",
action="store_const",
const="None",
help="Disable precorrelation windowing.",
default=windowtype,
)
wfunc.add_argument(
"--zeropadding",
dest="zeropadding",
action="store",
type=int,
metavar="PADVAL",
help=(
"Pad input functions to correlation with PADVAL zeros on each side. "
"A PADVAL of 0 does circular correlations, positive values reduce edge artifacts. "
f"Set PADVAL < 0 to set automatically. Default is {DEFAULT_PADVAL}."
),
default=DEFAULT_PADVAL,
)
def addplotopts(parser, multiline=True):
plotopts = parser.add_argument_group("General plot appearance options")
plotopts.add_argument(
"--title",
dest="thetitle",
metavar="TITLE",
type=str,
action="store",
help="Use TITLE as the overall title of the graph.",
default="",
)
plotopts.add_argument(
"--xlabel",
dest="xlabel",
metavar="LABEL",
type=str,
action="store",
help="Label for the plot x axis.",
default="",
)
plotopts.add_argument(
"--ylabel",
dest="ylabel",
metavar="LABEL",
type=str,
action="store",
help="Label for the plot y axis.",
default="",
)
if multiline:
plotopts.add_argument(
"--legends",
dest="legends",
metavar="LEGEND[,LEGEND[,LEGEND...]]",
type=str,
action="store",
help="Comma separated list of legends for each timecourse.",
default=None,
)
else:
plotopts.add_argument(
"--legend",
dest="legends",
metavar="LEGEND",
type=str,
action="store",
help="Legends for the timecourse.",
default=None,
)
plotopts.add_argument(
"--legendloc",
dest="legendloc",
metavar="LOC",
type=int,
action="store",
help=(
"Integer from 0 to 10 inclusive specifying legend location. Legal values are: "
"0: best, 1: upper right, 2: upper left, 3: lower left, 4: lower right, "
"5: right, 6: center left, 7: center right, 8: lower center, 9: upper center, "
"10: center. Default is 2."
),
default=2,
)
if multiline:
plotopts.add_argument(
"--colors",
dest="colors",
metavar="COLOR[,COLOR[,COLOR...]]",
type=str,
action="store",
help="Comma separated list of colors for each timecourse.",
default=None,
)
else:
plotopts.add_argument(
"--color",
dest="colors",
metavar="COLOR",
type=str,
action="store",
help="Color of the timecourse plot.",
default=None,
)
plotopts.add_argument(
"--nolegend",
dest="dolegend",
action="store_false",
help="Turn off legend label.",
default=True,
)
plotopts.add_argument(
"--noxax",
dest="showxax",
action="store_false",
help="Do not show x axis.",
default=True,
)
plotopts.add_argument(
"--noyax",
dest="showyax",
action="store_false",
help="Do not show y axis.",
default=True,
)
if multiline:
plotopts.add_argument(
"--linewidth",
dest="linewidths",
metavar="LINEWIDTH[,LINEWIDTH[,LINEWIDTH...]]",
type=str,
help="A comma separated list of linewidths (in points) for plots. Default is 1.",
default=None,
)
else:
plotopts.add_argument(
"--linewidth",
dest="linewidths",
metavar="LINEWIDTH",
type=str,
help="Linewidth (in points) for plot. Default is 1.",
default=None,
)
plotopts.add_argument(
"--tofile",
dest="outputfile",
metavar="FILENAME",
type=str,
action="store",
help="Write figure to file FILENAME instead of displaying on the screen.",
default=None,
)
plotopts.add_argument(
"--fontscalefac",
dest="fontscalefac",
metavar="FAC",
type=float,
action="store",
help="Scaling factor for annotation fonts (default is 1.0).",
default=1.0,
)
plotopts.add_argument(
"--saveres",
dest="saveres",
metavar="DPI",
type=int,
action="store",
help="Write figure to file at DPI dots per inch (default is 1000).",
default=1000,
)
def addpermutationopts(parser, numreps=10000):
permutationmethod = parser.add_mutually_exclusive_group()
permutationmethod.add_argument(
"--permutationmethod",
dest="permutationmethod",
action="store",
type=str,
choices=["shuffle", "phaserandom"],
help=(
"Permutation method for significance testing. "
f'Default is "{DEFAULT_PERMUTATIONMETHOD}".'
),
default=DEFAULT_PERMUTATIONMETHOD,
)
parser.add_argument(
"--numnull",
dest="numestreps",
action="store",
type=int,
metavar="NREPS",
help=(
"Estimate significance threshold by running "
f"NREPS null correlations (default is {numreps}, "
"set to 0 to disable). "
),
default=numreps,
)
parser.add_argument(
"--skipsighistfit",
dest="dosighistfit",
action="store_false",
help=("Do not fit significance histogram with a Johnson SB function."),
default=True,
)
def addsearchrangeopts(parser, details=False, defaultmin=-30.0, defaultmax=30.0):
parser.add_argument(
"--searchrange",
dest="lag_extrema",
action=IndicateSpecifiedAction,
nargs=2,
type=float,
metavar=("LAGMIN", "LAGMAX"),
help=(
"Limit fit to a range of lags from LAGMIN to "
"LAGMAX. Default is -30.0 to 30.0 seconds. "
),
default=(defaultmin, defaultmax),
)
if details:
parser.add_argument(
"--fixdelay",
dest="fixeddelayvalue",
action="store",
type=float,
metavar="DELAYTIME",
help=("Don't fit the delay time - set it to " "DELAYTIME seconds for all voxels. "),
default=None,
)
def postprocesssearchrangeopts(args):
# Additional argument parsing not handled by argparse
# first handle fixed delay
try:
test = args.fixeddelayvalue
except:
args.fixeddelayvalue = None
if args.fixeddelayvalue is not None:
args.fixdelay = True
args.lag_extrema = (args.fixeddelayvalue - 10.0, args.fixeddelayvalue + 10.0)
else:
args.fixdelay = False
# now set the extrema
try:
test = args.lag_extrema_nondefault
args.lagmin_nondefault = True
args.lagmax_nondefault = True
except AttributeError:
pass
args.lagmin = args.lag_extrema[0]
args.lagmax = args.lag_extrema[1]
return args
def addtimerangeopts(parser):
parser.add_argument(
"--timerange",
dest="timerange",
action="store",
nargs=2,
type=int,
metavar=("START", "END"),
help=(
"Limit analysis to data between timepoints "
"START and END in the input file. If END is set to -1, "
"analysis will go to the last timepoint. Negative values "
"of START will be set to 0. Default is to use all timepoints."
),
default=(-1, -1),
)
def postprocesstimerangeopts(args):
args.startpoint = int(args.timerange[0])
if args.timerange[1] == -1:
args.endpoint = 10000000000
else:
args.endpoint = int(args.timerange[1])
return args
def addsimilarityopts(parser):
parser.add_argument(
"--mutualinfosmoothingtime",
dest="smoothingtime",
action="store",
type=float,
metavar="TAU",
help=(
"Time constant of a temporal smoothing function to apply to the "
"mutual information function. "
"Default is 3.0 seconds. TAU <=0.0 disables smoothing."
),
default=3.0,
)
def setargs(thegetparserfunc, inputargs=None):
"""
Compile arguments for rapidtide workflow.
"""
if inputargs is None:
# get arguments from the command line
# LGR.info("processing command line arguments")
try:
args = thegetparserfunc().parse_args()
argstowrite = sys.argv
except SystemExit:
thegetparserfunc().print_help()
raise
else:
# get arguments from the passed list
# LGR.info("processing passed argument list:")
# LGR.info(inputargs)
try:
args = thegetparserfunc().parse_args(inputargs)
argstowrite = inputargs
except SystemExit:
thegetparserfunc().print_help()
raise
return args, argstowrite
def generic_init(theparser, themain, inputargs=None):
"""
Compile arguments either from the command line, or from an argument list.
"""
if inputargs is None:
print("processing command line arguments")
# write out the command used
try:
args = theparser().parse_args()
argstowrite = sys.argv
except SystemExit:
theparser().print_help()
raise
else:
print("processing passed argument list:")
try:
args = theparser().parse_args(inputargs)
argstowrite = inputargs
except SystemExit:
theparser().print_help()
raise
# save the raw and formatted command lines
args.commandline = " ".join(argstowrite)
themain(args)
|
bbfrederick/rapidtide
|
rapidtide/workflows/parser_funcs.py
|
Python
|
apache-2.0
| 21,939
|
from __future__ import absolute_import
from .. import log; log = log[__name__]
from .file import (
DoesNotExist, Key, Directory, File,
MemFile, TemporaryFile, root_open)
__all__ = [
'DoesNotExist',
'Key',
'Directory',
'File',
'MemFile',
'TemporaryFile',
'root_open',
]
|
ndawe/rootpy
|
rootpy/io/__init__.py
|
Python
|
bsd-3-clause
| 307
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self._session = driver.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._image_id = image_fake.get_valid_image_id()
self._instance_values = {
'name': 'fake_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': 'respool-1001(MyResPoolName)',
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(self._context,
**self._instance_values)
def _test_detach_disk_from_vm(self, destroy_disk=False):
def fake_call_method(module, method, *args, **kwargs):
vmdk_detach_config_spec = kwargs.get('spec')
virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy',
virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config,
'fileOperation'))
return 'fake_configure_task'
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
fake_device = vmwareapi_fake.DataObject()
fake_device.backing = vmwareapi_fake.DataObject()
fake_device.backing.fileName = 'fake_path'
fake_device.key = 'fake_key'
self._volumeops.detach_disk_from_vm('fake_vm_ref', self._instance,
fake_device, destroy_disk)
_wait_for_task.assert_has_calls([
mock.call('fake_configure_task')])
def test_detach_with_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=True)
def test_detach_without_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=False)
def _fake_call_get_dynamic_property(self, uuid, result):
def fake_call_method(vim, method, vm_ref, type, prop):
expected_prop = 'config.extraConfig["volume-%s"]' % uuid
self.assertEqual('VirtualMachine', type)
self.assertEqual(expected_prop, prop)
return result
return fake_call_method
def test_get_volume_uuid(self):
vm_ref = mock.Mock()
uuid = '1234'
opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
fake_call = self._fake_call_get_dynamic_property(uuid, opt_val)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertEqual('volume-val', val)
def test_get_volume_uuid_not_found(self):
vm_ref = mock.Mock()
uuid = '1234'
fake_call = self._fake_call_get_dynamic_property(uuid, None)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertIsNone(val)
def test_attach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref'),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info)
) as (get_vm_ref, get_volume_ref, get_vmdk_info):
self.assertRaises(exception.Invalid,
self._volumeops._attach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
def test_detach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info)
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info):
self.assertRaises(exception.Invalid,
self._volumeops._detach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
self.assertTrue(get_vmdk_info.called)
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
volume_device = mock.MagicMock()
volume_device.backing.fileName = 'fake-path'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', default_adapter_type,
disk_type, 1024,
device)
adapter_type = adapter_type or default_adapter_type
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, 'attach_disk_to_vm'),
mock.patch.object(self._volumeops, '_update_volume_details')
) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm,
update_volume_details):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
constants.DISK_TYPE_PREALLOCATED, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'], disk_uuid)
def _test_attach_volume_iscsi(self, adapter_type=None):
connection_info = {'driver_volume_type': 'iscsi',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
adapter_type = adapter_type or default_adapter_type
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_discover_target',
return_value=(mock.sentinel.device_name,
mock.sentinel.uuid)),
mock.patch.object(vm_util, 'get_scsi_adapter_type',
return_value=adapter_type),
mock.patch.object(self._volumeops, 'attach_disk_to_vm')
) as (get_vm_ref, iscsi_discover_target, get_scsi_adapter_type,
attach_disk_to_vm):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
iscsi_discover_target.assert_called_once_with(
connection_info['data'])
if adapter_type is None:
self.assertTrue(get_scsi_adapter_type.called)
attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, adapter_type, 'rdmp',
device_name=mock.sentinel.device_name)
def test_attach_volume_vmdk(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_iscsi(adapter_type)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume')
def test_consolidate_vmdk_volume_with_no_relocate(
self, relocate_vmdk_volume, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
device = mock.Mock(backing=backing)
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
self._volumeops._consolidate_vmdk_volume(self._instance, vm_ref,
device, volume_ref)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
self.assertFalse(relocate_vmdk_volume.called)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_relocate(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vmdk_volume, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
instance = self._instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_vmdk_volume.assert_called_once_with(
volume_ref, rp, datastore, host)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device, destroy_disk=True)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
|
akash1808/nova_test_latest
|
nova/tests/unit/virt/vmwareapi/test_volumeops.py
|
Python
|
apache-2.0
| 15,579
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from boututils import shell, launch, plotdata
from boutdata import collect
import numpy as np
from sys import argv
from math import sqrt, log, pi
from matplotlib import pyplot, ticker, rc
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern']})
if len(argv)==1:
end_index = -1
data_path = "data"
elif len(argv)==2:
try:
end_index = int(argv[1])
data_path = "data"
except ValueError:
end_index = -1
data_path = str(argv[1])
elif len(argv)==3:
end_index = int(argv[1])
data_path = str(argv[2])
else:
print("Arguments: '[end_index] [data_path]' or 'gamma [data_path]'")
Exit(1)
# Collect the data
Te = collect("T_electron", path=data_path, xind=2, info=True, yguards=True)
Ti = collect("T_ion", path=data_path, xind=2, info=True, yguards=True)
if end_index<0:
end_index = len(Te[:,0,0,0])
Te_left = []
Ti_left = []
for i in range(end_index):
Te_left.append(old_div((Te[i,0,2,0]+Te[i,0,3,0]),2))
Ti_left.append(old_div((Ti[i,0,2,0]+Ti[i,0,3,0]),2))
# Make plot
if len(argv)>2:
pyplot.semilogx(Te_left[:end_index],'r',Ti_left[:end_index],'b')
pyplot.title("Te (red) and Ti (blue) at the (left) boundary")
pyplot.axes().xaxis.set_major_formatter(ticker.FormatStrFormatter("%g"))
pyplot.axes().grid(color='grey', which='both')
else:
pyplot.semilogx(Te_left[:],'r',Ti_left[:],'b')
pyplot.title("Te (red) and Ti (blue) at the (left) boundary")
pyplot.axes().xaxis.set_major_formatter(ticker.FormatStrFormatter(r"$%g$"))
pyplot.axes().grid(color='grey', which='both')
pyplot.show()
|
kevinpetersavage/BOUT-dev
|
examples/non-local_1d/analyseboundary-Ts.py
|
Python
|
gpl-3.0
| 1,736
|
'''
A bunch of support functions used for GD optimisation. They depend on the
parallel implementation framework, but may change for other optimisers.
'''
import glob
import numpy
from os.path import splitext
from local_MapReduce import load, save
'''
Initialisation for local statistics
'''
def embeddings_set_grads(folder):
'''
Sets the grads and other local statistics often needed for optimisation locally for
each node. This is currently only implemented locally, but could easily be adapted
to the MapReduce framework to be done on remote nodes in parallel. There's no real
need to do this in parallel though, as the computations taking place are not that
time consuming.
'''
input_files = sorted(glob.glob(folder + '/*.grad_latest.npy'))
for file_name in input_files:
grads = load(file_name)
#print 'grads'
#print grads
# Save grad new as the latest grad evaluated
new_file = splitext(splitext(file_name)[0])[0] + '.grad_now.npy'
save(new_file, grads)
# Save the direction as the negative grad
new_file = splitext(splitext(file_name)[0])[0] + '.grad_d.npy'
save(new_file, -1 * grads)
'''
Getters for local statistics
'''
def embeddings_get_grads_current_grad(folder):
'''
Get the sum over the inputs of the inner product of grad_now with itself
'''
current_grad = 0
grad_now_files = sorted(glob.glob(folder + '/*.grad_now.npy'))
for grad_now_file in grad_now_files:
grad_now = load(grad_now_file)
current_grad += numpy.sum(numpy.abs(grad_now))
return current_grad
def embeddings_get_grads_max_gradnow(folder):
'''
Get the max abs element of the direction over all input files
'''
max_gradnow = 0
grad_now_files = sorted(glob.glob(folder + '/*.grad_now.npy'))
for grad_now_file in grad_now_files:
grad_now = load(grad_now_file)
max_gradnow = max(max_gradnow, numpy.max(numpy.abs(grad_now)))
return max_gradnow
'''
Setters for local statistics
'''
def embeddings_set_grads_update_d(folder, gamma):
'''
Update the value of the direction for each input to be gamma (given) times the old direction
minus grad_now
'''
grad_now_files = sorted(glob.glob(folder + '/*.grad_now.npy'))
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_now_file, grad_d_file in zip(grad_now_files, grad_d_files):
grad_now = load(grad_now_file)
grad_d = load(grad_d_file)
save(grad_d_file, - (grad_now + gamma * grad_d))
#save(grad_d_file, - (grad_now - gamma * grad_d))
def embeddings_set_grads_update_X(folder, step_size):
'''
Update the value of the local embeddings and variances themselves to be X + alpha * direction
'''
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
X_mu_files = sorted(glob.glob(folder + '/*.embedding.npy'))
X_S_files = sorted(glob.glob(folder + '/*.variance.npy'))
for grad_d_file, X_mu_file, X_S_file in zip(grad_d_files, X_mu_files, X_S_files):
grad_d = load(grad_d_file)
grad_d_X_mu = grad_d[0]
grad_d_X_S = grad_d[1]
X_mu = load(X_mu_file)
X_S = load(X_S_file)
#print 'X_mu'
#print X_mu
#print 'X_S'
#print X_S
save(X_mu_file, X_mu + step_size * grad_d_X_mu)
save(X_S_file, X_S + step_size * grad_d_X_S)
def embeddings_set_grads_update_grad_now(folder):
'''
Set grad_now to be grad_latest (a temp grad that keeps changing every evaluation)
'''
input_files = sorted(glob.glob(folder + '/*.grad_latest.npy'))
for file_name in input_files:
grads = load(file_name)
# Save grad old as latest grad new
new_file = splitext(splitext(file_name)[0])[0] + '.grad_now.npy'
save(new_file, grads)
|
TianpeiLuke/GParML
|
gd_local_MapReduce.py
|
Python
|
bsd-3-clause
| 3,855
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.agent.linux import ipset_manager
from neutron.tests import base
TEST_SET_ID = 'fake_sgid'
ETHERTYPE = 'IPv4'
TEST_SET_NAME = ipset_manager.IpsetManager.get_name(TEST_SET_ID, ETHERTYPE)
TEST_SET_NAME_NEW = TEST_SET_NAME + ipset_manager.SWAP_SUFFIX
FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4',
'10.0.0.5', '10.0.0.6']
class BaseIpsetManagerTest(base.BaseTestCase):
def setUp(self):
super(BaseIpsetManagerTest, self).setUp()
self.ipset = ipset_manager.IpsetManager()
self.execute = mock.patch.object(self.ipset, "execute").start()
self.expected_calls = []
self.expect_create()
self.force_sorted_get_set_ips()
def force_sorted_get_set_ips(self):
"""Force sorted responses by self.ipset._get_new/deleted_set_ips.
_get_new/deleted_set_ips use internally sets and return randomly
ordered responses. This method ensures sorted responses from them
in order to guarantee call order in self.ipset.set_members.
"""
original_get_new_set_ips = self.ipset._get_new_set_ips
original_get_deleted_set_ips = self.ipset._get_deleted_set_ips
def sorted_get_new_set_ips(set_name, expected_ips):
unsorted = original_get_new_set_ips(set_name, expected_ips)
return sorted(unsorted)
def sorted_get_deleted_set_ips(set_name, expected_ips):
unsorted = original_get_deleted_set_ips(set_name, expected_ips)
return sorted(unsorted)
mock.patch.object(self.ipset, '_get_new_set_ips',
side_effect=sorted_get_new_set_ips).start()
mock.patch.object(self.ipset, '_get_deleted_set_ips',
side_effect=sorted_get_deleted_set_ips).start()
def verify_mock_calls(self):
self.execute.assert_has_calls(self.expected_calls, any_order=False)
def expect_set(self, addresses):
temp_input = ['create %s hash:net family inet' % TEST_SET_NAME_NEW]
temp_input.extend('add %s %s' % (TEST_SET_NAME_NEW, ip)
for ip in self.ipset._sanitize_addresses(addresses))
input = '\n'.join(temp_input)
self.expected_calls.extend([
mock.call(['ipset', 'restore', '-exist'],
process_input=input,
run_as_root=True),
mock.call(['ipset', 'swap', TEST_SET_NAME_NEW, TEST_SET_NAME],
process_input=None,
run_as_root=True),
mock.call(['ipset', 'destroy', TEST_SET_NAME_NEW],
process_input=None,
run_as_root=True)])
def expect_add(self, addresses):
self.expected_calls.extend(
mock.call(['ipset', 'add', '-exist', TEST_SET_NAME, ip],
process_input=None,
run_as_root=True)
for ip in self.ipset._sanitize_addresses(addresses))
def expect_del(self, addresses):
self.expected_calls.extend(
mock.call(['ipset', 'del', TEST_SET_NAME, ip],
process_input=None,
run_as_root=True)
for ip in self.ipset._sanitize_addresses(addresses))
def expect_create(self):
self.expected_calls.append(
mock.call(['ipset', 'create', '-exist', TEST_SET_NAME,
'hash:net', 'family', 'inet'],
process_input=None,
run_as_root=True))
def expect_destroy(self):
self.expected_calls.append(
mock.call(['ipset', 'destroy', TEST_SET_NAME],
process_input=None,
run_as_root=True))
def add_first_ip(self):
self.expect_set([FAKE_IPS[0]])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, [FAKE_IPS[0]])
def add_all_ips(self):
self.expect_set(FAKE_IPS)
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
class IpsetManagerTestCase(BaseIpsetManagerTest):
def test_set_exists(self):
self.add_first_ip()
self.assertTrue(self.ipset.set_exists(TEST_SET_ID, ETHERTYPE))
def test_set_members_with_first_add_member(self):
self.add_first_ip()
self.verify_mock_calls()
def test_set_members_adding_less_than_5(self):
self.add_first_ip()
self.expect_add(FAKE_IPS[1:5])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5])
self.verify_mock_calls()
def test_set_members_deleting_less_than_5(self):
self.add_all_ips()
self.expect_del(FAKE_IPS[3:])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3])
self.verify_mock_calls()
def test_set_members_adding_more_than_5(self):
self.add_first_ip()
self.expect_set(FAKE_IPS)
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
self.verify_mock_calls()
def test_set_members_adding_all_zero_ipv4(self):
self.expect_set(['0.0.0.0/0'])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['0.0.0.0/0'])
self.verify_mock_calls()
def test_set_members_adding_all_zero_ipv6(self):
self.expect_set(['::/0'])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['::/0'])
self.verify_mock_calls()
def test_destroy(self):
self.add_first_ip()
self.expect_destroy()
self.ipset.destroy(TEST_SET_ID, ETHERTYPE)
self.verify_mock_calls()
|
paninetworks/neutron
|
neutron/tests/unit/agent/linux/test_ipset_manager.py
|
Python
|
apache-2.0
| 6,084
|
#!/usr/local/bin python
#-*-*- encoding: utf-8 -*-*-
import json
import random
import datetime
from flask import Flask, request, url_for, redirect
app = Flask(__name__)
###############################
#
# Store in DATA dictionaries
# representing users.
#
DATA = {
}
##################################
#
# Store in EXPIRED_DATA, expired
# addresses pointing to their
# previous URLs
#
EXPIRED_DATA = {
}
#####################################
#
# Main method. Authorized users
# come here directly, with a secret
# which is their identifier. This
# should be stored in a Redis or
# SQL database.
#
@app.route('/lab/<session_id>/')
def index(session_id):
data = DATA.get(session_id, None)
if data is None:
back_url = EXPIRED_DATA.get(session_id, None)
if back_url is None:
return "Session identifier not found"
else:
return redirect(back_url)
data['last_poll'] = datetime.datetime.now()
return """<html>
<head>
<meta http-equiv="refresh" content="10">
</head>
<body>
Hi %(username)s. You still have %(seconds)s seconds
</body>
</head>
""" % dict(username=data['username'], seconds=(data['max_date'] - datetime.datetime.now()).seconds)
def get_json():
# Retrieve the submitted JSON
if request.data:
data = request.data
else:
keys = request.form.keys() or ['']
data = keys[0]
return json.loads(data)
#############################################################
#
# WebLab-Deusto API:
#
# First, this method creates new sessions. We store the
# sessions on memory in this dummy example.
#
@app.route("/foo/weblab/sessions/", methods=['POST'])
def start_experiment():
# Parse it: it is a JSON file containing two fields:
request_data = get_json()
client_initial_data = json.loads(request_data['client_initial_data'])
server_initial_data = json.loads(request_data['server_initial_data'])
print server_initial_data
# Parse the initial date + assigned time to know the maximum time
start_date_str = server_initial_data['priority.queue.slot.start']
start_date_str, microseconds = start_date_str.split('.')
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(microseconds = int(microseconds))
max_date = start_date + datetime.timedelta(seconds = float(server_initial_data['priority.queue.slot.length']))
# Create a global session
session_id = str(random.randint(0, 10e8)) # Not especially secure 0:-)
DATA[session_id] = {
'username' : server_initial_data['request.username'],
'max_date' : max_date,
'last_poll' : datetime.datetime.now(),
'back' : request_data['back']
}
link = url_for('index', session_id=session_id, _external = True)
print "Assigned session_id: %s" % session_id
print "See:",link
return json.dumps({ 'url' : link, 'session_id' : session_id })
#############################################################
#
# WebLab-Deusto API:
#
# This method provides the current status of a particular
# user.
#
@app.route('/foo/weblab/sessions/<session_id>/status')
def status(session_id):
data = DATA.get(session_id, None)
if data is not None:
print "Did not poll in", datetime.datetime.now() - data['last_poll'], "seconds"
print "User %s still has %s seconds" % (data['username'], (data['max_date'] - datetime.datetime.now()).seconds)
if (datetime.datetime.now() - data['last_poll']).seconds > 30:
print "Kick out the user, please"
return json.dumps({'should_finish' : -1})
print "Ask in 10 seconds..."
#
# If the user is considered expired here, we can return -1 instead of 10.
# The WebLab-Deusto scheduler will mark it as finished and will reassign
# other user.
#
return json.dumps({'should_finish' : 10})
#############################################################
#
# WebLab-Deusto API:
#
# This method is called to kick one user out. This may happen
# when an administrator defines so, or when the assigned time
# is over.
#
@app.route('/foo/weblab/sessions/<session_id>', methods=['POST'])
def dispose_experiment(session_id):
request_data = get_json()
if 'action' in request_data and request_data['action'] == 'delete':
if session_id in DATA:
data = DATA.pop(session_id, None)
if data is not None:
EXPIRED_DATA[session_id] = data['back']
return 'deleted'
return 'not found'
return 'unknown op'
if __name__ == "__main__":
app.run(debug=True, host = '0.0.0.0')
|
zstars/weblabdeusto
|
experiments/unmanaged/http/python/flask/sample.py
|
Python
|
bsd-2-clause
| 4,706
|
"""Tests for the Bond integration."""
|
jawilson/home-assistant
|
tests/components/bond/__init__.py
|
Python
|
apache-2.0
| 38
|
import FWCore.ParameterSet.Config as cms
#---------------------------------------------------------------------------------------------------
# M A I N
#---------------------------------------------------------------------------------------------------
# create the process
process = cms.Process('FILEFI')
# say how many events to process (-1 means no limit)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
#>> input source
process.source = cms.Source(
"PoolSource",
#fileNames = cms.untracked.vstring('root://xrootd.unl.edu//store/mc/RunIIFall15DR76/TT_TuneCUETP8M1_13TeV-amcatnlo-pythia8/AODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/30000/029641E2-37A2-E511-9AB4-A0369F7F8E80.root')
fileNames = cms.untracked.vstring('file:/tmp/029641E2-37A2-E511-9AB4-A0369F7F8E80.root')
)
process.source.inputCommands = cms.untracked.vstring(
"keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT"
)
#>> configurations
# determine the global tag to use
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.GlobalTag.globaltag = 'MCRUN2_74_V9'
# define meta data for this production
process.configurationMetadata = cms.untracked.PSet(
name = cms.untracked.string('BambuProd'),
version = cms.untracked.string('Mit_043'),
annotation = cms.untracked.string('AODSIM')
)
#>> standard sequences
# load some standard sequences we will need
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
# define sequence for ProductNotFound
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
wantSummary = cms.untracked.bool(False)
)
# Import/Load the filler so all is already available for config changes
from MitProd.TreeFiller.MitTreeFiller_cfi import MitTreeFiller
process.load('MitProd.TreeFiller.MitTreeFiller_cfi')
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# R E C O S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
## Load stablePart producers
from MitEdm.Producers.conversionElectronsStable_cfi import electronsStable
process.load('MitEdm.Producers.conversionElectronsStable_cfi')
# Load Mit Mvf Conversion producer
# MultiVertexFitter is currently broken
#from MitProd.TreeFiller.conversionProducer_cff import conversionProducer, addConversionFiller
#process.load('MitProd.TreeFiller.conversionProducer_cff')
#addConversionFiller(MitTreeFiller)
# Electron likelihood-based id
from RecoEgamma.ElectronIdentification.ElectronMVAValueMapProducer_cfi import electronMVAValueMapProducer
process.load('RecoEgamma.ElectronIdentification.ElectronMVAValueMapProducer_cfi')
MitTreeFiller.Electrons.eIDLikelihoodName = 'electronMVAValueMapProducer:ElectronMVAEstimatorRun2Spring15Trig25nsV1Values'
# Load basic particle flow collections
# Used for rho calculation
from CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi import goodOfflinePrimaryVertices
from CommonTools.ParticleFlow.pfParticleSelection_cff import pfParticleSelectionSequence, pfPileUp, pfNoPileUp, pfPileUpIso, pfNoPileUpIso
from CommonTools.ParticleFlow.pfPhotons_cff import pfPhotonSequence
from CommonTools.ParticleFlow.pfElectrons_cff import pfElectronSequence
from CommonTools.ParticleFlow.pfMuons_cff import pfMuonSequence
from CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi import pfNoMuon
from CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi import pfNoElectron
process.load('CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi')
process.load('CommonTools.ParticleFlow.pfParticleSelection_cff')
process.load('CommonTools.ParticleFlow.pfPhotons_cff')
process.load('CommonTools.ParticleFlow.pfElectrons_cff')
process.load('CommonTools.ParticleFlow.pfMuons_cff')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoMuon_cfi')
process.load('CommonTools.ParticleFlow.TopProjectors.pfNoElectron_cfi')
# Loading PFProducer to get the ptrs
from RecoParticleFlow.PFProducer.pfLinker_cff import particleFlowPtrs
process.load('RecoParticleFlow.PFProducer.pfLinker_cff')
# Load PUPPI
from MitProd.TreeFiller.PuppiSetup_cff import puppiSequence
process.load('MitProd.TreeFiller.PuppiSetup_cff')
# recluster fat jets, btag subjets
from MitProd.TreeFiller.utils.makeFatJets import initFatJets,makeFatJets
pfbrecoSequence = initFatJets(process, isData = False)
ak8chsSequence = makeFatJets(process, isData = False, algoLabel = 'AK', jetRadius = 0.8)
ak8puppiSequence = makeFatJets(process, isData = False, algoLabel = 'AK', jetRadius = 0.8, pfCandidates = 'puppiNoLepPlusLep')
ca15chsSequence = makeFatJets(process, isData = False, algoLabel = 'CA', jetRadius = 1.5)
ca15puppiSequence = makeFatJets(process, isData = False, algoLabel = 'CA', jetRadius = 1.5, pfCandidates = 'puppiNoLepPlusLep')
# unload unwanted PAT stuff
delattr(process, 'pfNoTauPFBRECOPFlow')
delattr(process, 'loadRecoTauTagMVAsFromPrepDBPFlow')
pfPileUp.PFCandidates = 'particleFlowPtrs'
pfNoPileUp.bottomCollection = 'particleFlowPtrs'
pfPileUpIso.PFCandidates = 'particleFlowPtrs'
pfNoPileUpIso.bottomCollection='particleFlowPtrs'
pfPileUp.Enable = True
pfPileUp.Vertices = 'goodOfflinePrimaryVertices'
pfPileUp.checkClosestZVertex = cms.bool(False)
# PUPPI jets
from RecoJets.JetProducers.ak4PFJetsPuppi_cfi import ak4PFJetsPuppi
process.load('RecoJets.JetProducers.ak4PFJetsPuppi_cfi')
ak4PFJetsPuppi.src = cms.InputTag('puppiNoLepPlusLep')
ak4PFJetsPuppi.doAreaFastjet = True
# Load FastJet L1 corrections
#from MitProd.TreeFiller.FastJetCorrection_cff import l1FastJetSequence
#process.load('MitProd.TreeFiller.FastJetCorrection_cff')
# Setup jet corrections
process.load('JetMETCorrections.Configuration.JetCorrectionServices_cff')
# Load btagging
from MitProd.TreeFiller.utils.setupBTag import setupBTag
ak4PFBTagSequence = setupBTag(process, 'ak4PFJets', 'AKt4PF')
ak4PFCHSBTagSequence = setupBTag(process, 'ak4PFJetsCHS', 'AKt4PFCHS')
ak4PFPuppiBTagSequence = setupBTag(process, 'ak4PFJetsPuppi', 'AKt4PFPuppi')
# Load HPS tau reconstruction (tau in AOD is older than the latest reco in release)
from RecoTauTag.Configuration.RecoPFTauTag_cff import PFTau
process.load('RecoTauTag.Configuration.RecoPFTauTag_cff')
#> Setup the met filters
from MitProd.TreeFiller.metFilters_cff import metFilters
process.load('MitProd.TreeFiller.metFilters_cff')
#> The bambu reco sequence
recoSequence = cms.Sequence(
electronsStable *
electronMVAValueMapProducer *
# conversionProducer *
goodOfflinePrimaryVertices *
particleFlowPtrs *
pfParticleSelectionSequence *
pfPhotonSequence *
pfMuonSequence *
pfNoMuon *
pfElectronSequence *
pfNoElectron *
PFTau *
puppiSequence *
ak4PFJetsPuppi *
# l1FastJetSequence *
ak4PFBTagSequence *
ak4PFCHSBTagSequence *
ak4PFPuppiBTagSequence *
pfbrecoSequence*
ak8chsSequence*
ak8puppiSequence*
ca15chsSequence*
ca15puppiSequence*
metFilters
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# G E N S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Import/Load genjets
from RecoJets.Configuration.GenJetParticles_cff import genJetParticles
process.load('RecoJets.Configuration.GenJetParticles_cff')
from RecoJets.Configuration.RecoGenJets_cff import ak4GenJets, ak8GenJets
process.load('RecoJets.Configuration.RecoGenJets_cff')
genSequence = cms.Sequence(
genJetParticles *
ak4GenJets *
ak8GenJets
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# B A M B U S E Q U E N C E
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# remember the bambu sequence has been imported and loaded already in the beginning
# configure the filler
MitTreeFiller.TreeWriter.fileName = 'bambu-output-file-tmp'
MitTreeFiller.PileupInfo.active = True
MitTreeFiller.MCParticles.active = True
MitTreeFiller.MCEventInfo.active = True
MitTreeFiller.MCVertexes.active = True
# define fill bambu filler sequence
bambuFillerSequence = cms.Sequence(
MitTreeFiller
)
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
#
# C M S S W P A T H
#
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
process.path = cms.Path(
recoSequence *
genSequence *
bambuFillerSequence
)
|
cpausmit/Kraken
|
filefi/043/mc.py
|
Python
|
mit
| 9,190
|
from django import template
from dojango.conf import settings # using the app-specific settings
from dojango.util import json_encode as util_json_encode
from dojango.util.config import Config
register = template.Library()
class DojangoParamsNode(template.Node):
'''We set the DOJANGO context with this node!'''
def __init__(self, profile=settings.DOJO_PROFILE, version=settings.DOJO_VERSION):
self.profile = profile
self.version = version
def render(self, context):
config = Config(self.profile, self.version)
if not config.config:
raise template.TemplateSyntaxError, "Could not find the profile '%s' in the DOJANGO_DOJO_PROFILES settings" % (self.profile)
if not config.dojo_base_url:
raise template.TemplateSyntaxError, "The version %s is not supported by the dojango profile '%s'" % (self.version, self.profile)
context['DOJANGO'] = config.get_context_dict()
return ''
@register.tag
def set_dojango_context(parser, token):
'''Sets the DOJANGO context constant in the context.
It is also possible to set the used profile/version with it, e.g.:
{% set_dojango_context "google" "1.1.1" %}'''
tlist = token.split_contents()
# the profile was passed
if len(tlist) == 2:
return DojangoParamsNode(tlist[1][1:-1])
if len(tlist) == 3:
return DojangoParamsNode(tlist[1][1:-1], tlist[2][1:-1])
return DojangoParamsNode()
# TODO: Implement template-tag for layout components to register e.g. data-dojo-type="dijit.layout.TabContainer"
# {% dojo_type "dijit.layout.TabContainer" %}
# This template tag informs the collector about new modules
|
ricard33/dojango
|
dojango/templatetags/dojango_base.py
|
Python
|
bsd-3-clause
| 1,688
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys; sys.path.insert(0, '.')
import dual_net
import strategies
import sgf_wrapper
import numpy as np
import pdb
import random
import features
import symmetries
def initialize_game(sgf_file, load_file, move=1):
with open(sgf_file) as f:
sgf_contents = f.read()
iterator = sgf_wrapper.replay_sgf(sgf_contents)
for i in range(move):
position_w_context = next(iterator)
player = strategies.MCTSPlayerMixin(dual_net.DualNetwork(load_file))
player.initialize_game(position_w_context.position)
return player
def analyze_symmetries(sgf_file, load_file):
with open(sgf_file) as f:
sgf_contents = f.read()
iterator = sgf_wrapper.replay_sgf(sgf_contents)
net = dual_net.DualNetwork(load_file)
for i, pwc in enumerate(iterator):
if i < 200:
continue
feats = features.extract_features(pwc.position)
variants = [symmetries.apply_symmetry_feat(s, feats) for s in symmetries.SYMMETRIES]
values = net.sess.run(
net.inference_output['value_output'],
feed_dict={net.inference_input['pos_tensor']: variants})
mean = np.mean(values)
stdev = np.std(values)
all_vals = sorted(zip(values, symmetries.SYMMETRIES))
print("{:3d} {:.3f} +/- {:.3f} min {:.3f} {} max {:.3f} {}".format(
i, mean, stdev, *all_vals[0], *all_vals[-1]))
|
mlperf/training_results_v0.5
|
v0.5.0/intel/intel_minigo_submission_public_tensorflow/code/minigo/tensorflow/minigo/oneoffs/inspect_game.py
|
Python
|
apache-2.0
| 1,967
|
from __future__ import unicode_literals
import json
import logging
import requests
from requests.exceptions import HTTPError
from requests_hawk import HawkAuth
# When releasing a new version to PyPI please also file a bug to request
# that it is uploaded to http://pypi.pub.build.mozilla.org/pub/ too.
# See bug 1191498 for an example of this.
__version__ = '1.8.0'
logger = logging.getLogger(__name__)
class ValidatorMixin(object):
def validate(self, required_properties={}):
"""
Implement job object validation rules. If a rule fails to validate
raise TreeherderClientError
Classes using this mixin should implement a required_properties
dict. The keys in this dict are the required keys in the struture
contained in self.data. Nested keys can be specified with the '.'
operator. Each key in required_properties should have a dict value
like so:
{
'len':optional, some int, max allowed len of property value
'type':optional, some data type, required type of property
value
'cb': some function reference, called with
list of keys, list of values, required_properties key
}
Example:
self.required_properties = {
'revision_hash':{
'len':50, 'cb':self.validate_existence
},
'project':{
'cb':self.validate_existence
},
'job':{
'type':dict, 'cb':self.validate_existence
},
'job.job_guid':{
'len':50, 'cb':self.validate_existence
}
}
"""
required_properties = required_properties or self.required_properties
for prop in required_properties:
cb = required_properties[prop]['cb']
cb(prop.split('.'), required_properties[prop], prop)
def validate_existence(self, keys, values, property_key):
"""
This required_properties callback method confirms the following.
- The keys provided are found in required_properties
- The type of the values match the specified type
- The values are defined and less than the required len
if a len is specified
If any of these assertions fail TreeherderClientError is raised
"""
# missing keys
missing_keys = []
property_errors = ''
# get value
v = None
for index, k in enumerate(keys):
if index > 0:
try:
v = v[k]
except KeyError:
missing_keys.append(k)
else:
try:
v = self.data[k]
except KeyError:
missing_keys.append(k)
if missing_keys:
property_errors += ('\tThe required Property, {0}, is '
'missing\n'.format('.'.join(missing_keys)))
if not v:
property_errors += '\tValue not defined for {0}\n'.format(
property_key)
elif ('type' in values) and (not isinstance(v, values['type'])):
property_errors += ('\tThe value type, {0}, should be '
'{1}\n'.format(type(v), values['type']))
max_limit = values.get('len', None)
if v and max_limit and (len(v) > max_limit):
property_errors += ('\tValue length exceeds maximum {0} char '
'limit: {1}\n'.format(str(max_limit), str(v)))
if property_errors:
msg = ('{0} structure validation errors detected for property:{1}'
'\n{2}\n{3}\n'.format(
self.__class__.__name__, property_key, property_errors,
json.dumps(self.data)))
raise TreeherderClientError(msg, [])
class TreeherderData(object):
def __init__(self, data={}):
self.data = {}
if data:
self.data = data
else:
self.init_data()
def to_json(self):
return json.dumps(self.data)
class TreeherderJob(TreeherderData, ValidatorMixin):
PARSE_STATUSES = {'pending', 'parsed', 'error'}
def __init__(self, data={}):
super(TreeherderJob, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'project': {'cb': self.validate_existence},
'job': {'type': dict, 'cb': self.validate_existence},
'job.job_guid': {'len': 50, 'cb': self.validate_existence}
}
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_coalesced_guid(self, guids):
if guids:
self.data['coalesced'].extend(guids)
def add_project(self, project):
self.data['project'] = project
def add_job_guid(self, guid):
self.data['job']['job_guid'] = guid
def add_job_name(self, name):
self.data['job']['name'] = name
def add_job_symbol(self, symbol):
self.data['job']['job_symbol'] = symbol
def add_group_name(self, name):
self.data['job']['group_name'] = name
def add_group_symbol(self, symbol):
self.data['job']['group_symbol'] = symbol
def add_description(self, desc):
self.data['job']['desc'] = desc
def add_product_name(self, name):
self.data['job']['product_name'] = name
def add_state(self, state):
self.data['job']['state'] = state
def add_result(self, result):
self.data['job']['result'] = result
def add_reason(self, reason):
self.data['job']['reason'] = reason
def add_who(self, who):
self.data['job']['who'] = who
def add_submit_timestamp(self, tstamp):
self.data['job']['submit_timestamp'] = tstamp
def add_start_timestamp(self, tstamp):
self.data['job']['start_timestamp'] = tstamp
def add_end_timestamp(self, tstamp):
self.data['job']['end_timestamp'] = tstamp
def add_machine(self, machine):
self.data['job']['machine'] = machine
def add_build_info(self, os_name, platform, arch):
self.data['job']['build_platform']['os_name'] = os_name
self.data['job']['build_platform']['platform'] = platform
self.data['job']['build_platform']['architecture'] = arch
def add_machine_info(self, os_name, platform, arch):
self.data['job']['machine_platform']['os_name'] = os_name
self.data['job']['machine_platform']['platform'] = platform
self.data['job']['machine_platform']['architecture'] = arch
def add_option_collection(self, option_collection):
if option_collection:
self.data['job']['option_collection'].update(option_collection)
def add_tier(self, tier):
self.data['job']['tier'] = tier
def add_log_reference(self, name, url, parse_status='pending'):
"""
parse_status - one of 'pending', 'parsed' or 'error'
"""
if parse_status not in self.PARSE_STATUSES:
msg = "{0}: Invalid parse_status '{1}': must be one of: {2}".format(
self.__class__.__name__,
parse_status,
', '.join(self.PARSE_STATUSES)
)
raise TreeherderClientError(msg, [])
if name and url:
self.data['job']['log_references'].append(
{'url': url, 'name': name, 'parse_status': parse_status}
)
def add_artifact(self, name, artifact_type, blob):
if blob:
self.data['job']['artifacts'].append({
'name': name,
'type': artifact_type,
'blob': blob,
'job_guid': self.data['job']['job_guid']
})
def init_data(self):
self.data = {
'revision_hash': '',
'project': '',
'job': {
# Stored in project_jobs_1.job.job_guid
'job_guid': '',
# Stored in treeherder_reference_1.job_type.name
'name': '',
# Stored in treeherder_reference_1.job_type.name
'desc': '',
# Stored symbol represending the job in the UI
# Stored in treeherder_reference_1.job_type.symbol
'job_symbol': '',
# human readable group name (can be null)
# Stored in treeherder_reference_1.job_group.name
'group_name': '',
# Stored symbol representing the job group (can be null)
# Stored in treeherder_reference_1.job_group.symbol
'group_symbol': '',
# Stored in treeherder_reference_1.product
'product_name': '',
# Stored in project_jobs_1.job.state
'state': '',
# Stored in project_jobs_1.job.result
'result': '',
# Stored in project_jobs_1.job.reason
'reason': '',
# Stored in project_jobs_1.job.who
'who': '',
# Stored in project_jobs_1.job.submit_timestamp
'submit_timestamp': '',
# Stored in project_jobs_1.job.start_timestamp
'start_timestamp': '',
# Stored in project_jobs_1.job.end_timestamp
'end_timestamp': '',
# Stored in treeherder_reference_1.machine.name
'machine': '',
# Stored in
# treeherder_reference_1.build_platform.os_name,
# treeherder_reference_1.build_platform.platform,
# treeherder_reference_1.build_platform.architecture,
'build_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in:
# treeherder_reference_1.machine_platform.os_name,
# treeherder_reference_1.machine_platform.platform,
# treeherder_reference_1.machine_platform.architecture,
'machine_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in treeherder_reference_1.option_collection and
# treeherder_reference_1.option
# Ex: 'debug | pgo | asan | opt': True
'option_collection': {},
# Stored in project_jobs_1.job_log_url
# Example:
# log_references: [
# { url: 'http://ftp.mozilla.org/mozilla.org/firefox.gz',
# name: 'unittest' },
'log_references': [],
# Stored in
# project_jobs_1.job_artifact.name
# project_jobs_1.job_artifact.type
# project_jobs_1.job_artifact.blob
'artifacts': []
},
# List of job_guids that were coallesced to this job
# Stored in project_jobs_1.job.coalesced_job_guid
# Where the value of coalesced_job_guid is set to job_guid
# for the list of job_guids provided in coalesced
'coalesced': []
}
class TreeherderRevision(TreeherderData, ValidatorMixin):
"""
Supports building a revision structure that is contained in
TreeherderResultSet.
"""
def __init__(self, data={}):
super(TreeherderRevision, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision': {'len': 50, 'cb': self.validate_existence},
'repository': {'cb': self.validate_existence},
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.revision.author
'author': '',
# Stored in project_jobs_1.revision.comments
'comment': '',
# Stored in treeherder_reference_1.repository.name
'repository': '',
# Stored in project_jobs_1.revision.revision
'revision': '',
}
def add_author(self, author):
self.data['author'] = author
def add_comment(self, comment):
self.data['comment'] = comment
def add_repository(self, repository):
self.data['repository'] = repository
def add_revision(self, revision):
self.data['revision'] = revision
class TreeherderResultSet(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder result set
"""
def __init__(self, data={}):
super(TreeherderResultSet, self).__init__(data)
self.required_properties = {
'revision_hash': {'len': 50, 'cb': self.validate_existence},
'revisions': {'type': list, 'cb': self.validate_existence},
'author': {'len': 150, 'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.result_set.push_timestamp
'push_timestamp': None,
# Stored in project_jobs_1.result_set.revision_hash
'revision_hash': '',
# Stored in project_jobs_1.result_set.author
'author': '',
# Stored in project_jobs_1.revision, new row per revision
'revisions': [],
# TODO: add type column to resultset in treeherder-service
'type': '',
}
def add_push_timestamp(self, push_timestamp):
self.data['push_timestamp'] = push_timestamp
def add_revision_hash(self, revision_hash):
self.data['revision_hash'] = revision_hash
def add_author(self, author):
self.data['author'] = author
def add_revisions(self, revisions):
if revisions:
self.data['revisions'] = revisions
def add_revision(self, revision):
if revision:
revision.validate()
self.data['revisions'].append(revision.data)
def add_type(self, resultset_type):
self.data['type'] = resultset_type
def get_revision(self, data={}):
return TreeherderRevision(data)
class TreeherderArtifact(TreeherderData, ValidatorMixin):
"""
Supports building a treeherder job artifact
"""
def __init__(self, data={}):
super(TreeherderArtifact, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'blob': {'cb': self.validate_existence},
'type': {'cb': self.validate_existence},
'name': {'cb': self.validate_existence},
'job_guid': {'cb': self.validate_existence}
}
def init_data(self):
self.data = {
# Stored in project_jobs_1.artifact.blob
'blob': '',
# Stored in project_jobs_1.artifact.type
'type': '',
# Stored in project_jobs_1.artifact.name
'name': '',
# Stored in project_jobs_1.artifact.job_guid
'job_guid': None
}
def add_blob(self, blob):
self.data['blob'] = blob
def add_type(self, type):
self.data['type'] = type
def add_name(self, name):
self.data['name'] = name
def add_job_guid(self, job_guid):
self.data['job_guid'] = job_guid
class TreeherderCollection(object):
"""
Base class for treeherder data collections
"""
def __init__(self, endpoint_base, data=[]):
self.data = []
self.endpoint_base = endpoint_base
if data:
self.data = data
def get_collection_data(self):
"""
Build data structure containing the data attribute only for
each item in the collection
"""
data_struct = []
for datum_instance in self.data:
data_struct.append(datum_instance.data)
return data_struct
def to_json(self):
"""
Convert list of data objects to json
"""
return json.dumps(self.get_collection_data())
def add(self, datum_instance):
"""
Add a data structure class instance to data list
"""
self.data.append(datum_instance)
def validate(self):
"""
validate the data structure class
"""
for d in self.data:
d.validate()
def get_chunks(self, chunk_size):
"""
Return a generator of new collections broken into chunks of size ``chunk_size``.
Each chunk will be a ``TreeherderCollection`` of the same
type as the original with a max of ``chunk_size`` count of
``TreeherderData`` objects.
Each collection must then be POSTed individually.
"""
for i in range(0, len(self.data), chunk_size):
# we must copy not only the data chunk,
# but also the endpoint_base or any other field of the
# collection. In the case of a TreeherderJobCollection,
# this is determined in the constructor.
chunk = self.__class__(self.data[i:i + chunk_size])
chunk.endpoint_base = self.endpoint_base
yield chunk
class TreeherderJobCollection(TreeherderCollection):
"""
Collection of job objects
"""
def __init__(self, data=[]):
super(TreeherderJobCollection, self).__init__('jobs', data)
def get_job(self, data={}):
return TreeherderJob(data)
class TreeherderResultSetCollection(TreeherderCollection):
"""
Collection of result set objects
"""
def __init__(self, data=[]):
super(TreeherderResultSetCollection, self).__init__('resultset', data)
def get_resultset(self, data={}):
return TreeherderResultSet(data)
class TreeherderArtifactCollection(TreeherderCollection):
"""
Collection of job artifacts
"""
def __init__(self, data=[]):
super(TreeherderArtifactCollection, self).__init__('artifact', data)
def get_artifact(self, data={}):
return TreeherderArtifact(data)
class TreeherderClient(object):
"""
Treeherder client class
"""
PROTOCOLS = {'http', 'https'} # supported protocols
API_VERSION = '1.0'
REQUEST_HEADERS = {
'Accept': 'application/json; version={}'.format(API_VERSION),
'User-Agent': 'treeherder-pyclient/{}'.format(__version__),
}
UPDATE_ENDPOINT = 'job-log-url/{}/update_parse_status'
RESULTSET_ENDPOINT = 'resultset'
JOBS_ENDPOINT = 'jobs'
JOB_LOG_URL_ENDPOINT = 'job-log-url'
ARTIFACTS_ENDPOINT = 'artifact'
OPTION_COLLECTION_HASH_ENDPOINT = 'optioncollectionhash'
REPOSITORY_ENDPOINT = 'repository'
JOBGROUP_ENDPOINT = 'jobgroup'
JOBTYPE_ENDPOINT = 'jobtype'
PRODUCT_ENDPOINT = 'product'
MACHINE_ENDPOINT = 'machine'
MACHINE_PLATFORM_ENDPOINT = 'machineplatform'
FAILURE_CLASSIFICATION_ENDPOINT = 'failureclassification'
BUILD_PLATFORM_ENDPOINT = 'buildplatform'
MAX_COUNT = 2000
def __init__(
self, protocol='https', host='treeherder.mozilla.org',
timeout=120, auth=None, client_id=None, secret=None):
"""
:param protocol: protocol to use (http or https)
:param host: treeherder host to post to
:param timeout: maximum time it can take for a request to complete
:param auth: an instance of HawkAuth/TreeherderAuth holding the auth credentials (deprecated)
:param client_id: the Treeherder API credentials client ID
:param secret: the Treeherder API credentials secret
"""
self.host = host
if protocol not in self.PROTOCOLS:
raise AssertionError('Protocol "%s" not supported; please use one '
'of %s' % (protocol,
', '.join(self.PROTOCOLS)))
self.protocol = protocol
self.timeout = timeout
if auth:
logger.warning('The `auth` param is deprecated. Pass the new Hawk credentials '
'as `client_id` and `secret` instead (see bug 1212936).')
self.auth = auth
elif client_id and secret:
self.auth = HawkAuth(credentials={
'id': client_id,
'key': secret,
'algorithm': 'sha256'
})
else:
self.auth = None
def _get_project_uri(self, project, endpoint):
return '{0}://{1}/api/project/{2}/{3}/'.format(
self.protocol, self.host, project, endpoint
)
def _get_uri(self, endpoint):
uri = '{0}://{1}/api/{2}'.format(
self.protocol, self.host, endpoint)
return uri
def _get_json_list(self, endpoint, timeout, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
total = None if params["count"] is None else params["count"]
count = self.MAX_COUNT
offset = 0
data = []
while True:
params["count"] = count
params["offset"] = offset
new_data = self._get_json(endpoint, timeout, project=project, **params)["results"]
data += new_data
if len(new_data) < self.MAX_COUNT:
return data
offset += count
if total is not None:
count = min(total-offset, self.MAX_COUNT)
else:
return self._get_json(endpoint, timeout, project=project, **params)["results"]
def _get_json(self, endpoint, timeout, project=None, **params):
if timeout is None:
timeout = self.timeout
if project is None:
uri = self._get_uri(endpoint)
else:
uri = self._get_project_uri(project, endpoint)
resp = requests.get(uri, timeout=timeout, params=params,
headers=self.REQUEST_HEADERS)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
return resp.json()
def _post_json(self, project, endpoint, data,
timeout, auth):
if timeout is None:
timeout = self.timeout
auth = auth or self.auth
uri = self._get_project_uri(project, endpoint)
resp = requests.post(uri, json=data,
headers=self.REQUEST_HEADERS,
timeout=timeout, auth=auth)
try:
resp.raise_for_status()
except HTTPError as e:
response = e.response
logger.error("Error submitting data to %s" % response.request.url)
logger.error("Request headers: %s" % response.request.headers)
logger.error("Request body: %s" % response.request.body)
logger.error("Response headers: %s" % response.headers)
logger.error("Response body: %s" % response.content)
raise
def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT, None)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret
def get_repositories(self):
"""
Gets a list of valid treeherder repositories.
Returns a list with the following structure:
[
{name: repository-name, dvcs_type: dcvs-type, ...},
...
]
"""
return self._get_json(self.REPOSITORY_ENDPOINT, None)
def get_products(self):
"""
Get a list of treeherder products.
Returns a list with the following structure:
{
id: <id>,
name: <name>,
description: <description>,
active_status: <active_status>
}
"""
return self._get_json(self.PRODUCT_ENDPOINT, None)
def get_job_groups(self):
"""
Gets a list of job groups stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
symbol: <symbol>,
name: <name>
...
}
"""
return self._get_json(self.JOBGROUP_ENDPOINT, None)
def get_failure_classifications(self):
"""
Gets a list of failure classification types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
name: <name>,
description: <description>,
active_status: <active_status>
}
"""
return self._get_json(self.FAILURE_CLASSIFICATION_ENDPOINT, None)
def get_build_platforms(self):
"""
Gets a list of build platforms stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
os_name: <os_name>,
platform: <platform>,
architecture: <architecture>
active_status: <active_status>
}
"""
return self._get_json(self.BUILD_PLATFORM_ENDPOINT, None)
def get_job_types(self):
"""
Gets a list of job types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
job_group: <job_group_id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBTYPE_ENDPOINT, None)
def get_machines(self):
"""
Gets a list of machines stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
name: <name>,
first_timestamp: <first_timestamp>,
last_timestamp: <last_timestamp>,
active_status: <active_status>
}
"""
return self._get_json(self.MACHINE_ENDPOINT, None)
def get_machine_platforms(self):
"""
Gets a list of machine platforms stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
os_name: <os_name>
platform: <platform>,
architecture: <architecture>,
active_status: <active_status>
}
"""
return self._get_json(self.MACHINE_PLATFORM_ENDPOINT, None)
def get_resultsets(self, project, **params):
"""
Gets resultsets from project, filtered by parameters
By default this method will just return the latest 10 result sets (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.RESULTSET_ENDPOINT, None, project, **params)
def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, None, project, **params)
def get_job_log_url(self, project, **params):
"""
Gets job log url, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json(self.JOB_LOG_URL_ENDPOINT, None, project,
**params)
def get_artifacts(self, project, **params):
"""
Gets artifact list from project, filtered by parameters
:param project: project (repository name) to query for
:param params: keyword arguments to filter results
"""
response = self._get_json(self.ARTIFACTS_ENDPOINT, None, project, **params)
return response
def post_collection(self, project, collection_inst, timeout=None, auth=None):
"""
Sends a treeherder collection to the server
:param project: project to submit data for
:param collection_inst: a TreeherderCollection instance
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of HawkAuth/TreeherderAuth (deprecated)
"""
if auth:
logger.warning('The `auth` param is deprecated. Pass the new Hawk credentials '
'to the TreeherderClient constructor instead (see bug 1212936).')
else:
auth = self.auth
if not isinstance(collection_inst, TreeherderCollection):
msg = '{0} should be an instance of TreeherderCollection'.format(
type(collection_inst))
raise TreeherderClientError(msg, [])
if not collection_inst.endpoint_base:
msg = "{0}: collection endpoint_base property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
if not collection_inst.data:
msg = "{0}: collection data property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
collection_inst.validate()
self._post_json(project, collection_inst.endpoint_base,
collection_inst.get_collection_data(),
timeout, auth)
def update_parse_status(self, project, job_log_url_id,
parse_status, timeout=None, auth=None):
"""
Updates the parsing status of a treeherder job
:param project: project to submit data for
:param parse_status: string representing parse status of a treeherder
job
:param timeout: custom timeout in seconds (defaults to class timeout)
:param auth: an instance of HawkAuth/TreeherderAuth (deprecated)
"""
if auth:
logger.warning('The `auth` param is deprecated. Pass the new Hawk credentials '
'to the TreeherderClient constructor instead (see bug 1212936).')
else:
auth = self.auth
self._post_json(project, self.UPDATE_ENDPOINT.format(job_log_url_id),
{'parse_status': parse_status},
timeout, auth)
class TreeherderClientError(Exception):
def __init__(self, msg, Errors):
Exception.__init__(self, msg)
self.Errors = Errors
|
gbrmachado/treeherder
|
treeherder/client/thclient/client.py
|
Python
|
mpl-2.0
| 31,703
|
"""
Test some SBModule and SBSection APIs.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
from lldbutil import symbol_type_to_str
class ModuleAndSectionAPIsTestCase(TestBase):
mydir = os.path.join("python_api", "module_section")
@python_api_test
def test_module_and_section(self):
"""Test module and section APIs."""
self.buildDefault()
self.module_and_section()
@python_api_test
def test_module_and_section_boundary_condition(self):
"""Test module and section APIs by passing None when it expects a Python string."""
self.buildDefault()
self.module_and_section_boundary_condition()
@python_api_test
def test_module_compile_unit_iter(self):
"""Test module's compile unit iterator APIs."""
self.buildDefault()
self.module_compile_unit_iter()
def module_and_section(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.assertTrue(target.GetNumModules() > 0)
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print "Number of modules for the target: %d" % target.GetNumModules()
for module in target.module_iter():
print module
# Get the executable module at index 0.
exe_module = target.GetModuleAtIndex(0)
print "Exe module: %s" % str(exe_module)
print "Number of sections: %d" % exe_module.GetNumSections()
INDENT = ' ' * 4
INDENT2 = INDENT * 2
for sec in exe_module.section_iter():
print sec
print INDENT + "Number of subsections: %d" % sec.GetNumSubSections()
if sec.GetNumSubSections() == 0:
for sym in exe_module.symbol_in_section_iter(sec):
print INDENT + str(sym)
print INDENT + "symbol type: %s" % symbol_type_to_str(sym.GetType())
else:
for subsec in sec:
print INDENT + str(subsec)
# Now print the symbols belonging to the subsection....
for sym in exe_module.symbol_in_section_iter(subsec):
print INDENT2 + str(sym)
print INDENT2 + "symbol type: %s" % symbol_type_to_str(sym.GetType())
def module_and_section_boundary_condition(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.assertTrue(target.GetNumModules() > 0)
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print "Number of modules for the target: %d" % target.GetNumModules()
for module in target.module_iter():
print module
# Get the executable module at index 0.
exe_module = target.GetModuleAtIndex(0)
print "Exe module: %s" % str(exe_module)
print "Number of sections: %d" % exe_module.GetNumSections()
# Boundary condition testings. Should not crash lldb!
exe_module.FindFirstType(None)
exe_module.FindTypes(None)
exe_module.FindGlobalVariables(target, None, 1)
exe_module.FindFunctions(None, 0)
exe_module.FindSection(None)
# Get the section at index 1.
if exe_module.GetNumSections() > 1:
sec1 = exe_module.GetSectionAtIndex(1)
print sec1
else:
sec1 = None
if sec1:
sec1.FindSubSection(None)
def module_compile_unit_iter(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.assertTrue(target.GetNumModules() > 0)
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print "Number of modules for the target: %d" % target.GetNumModules()
for module in target.module_iter():
print module
# Get the executable module at index 0.
exe_module = target.GetModuleAtIndex(0)
print "Exe module: %s" % str(exe_module)
print "Number of compile units: %d" % exe_module.GetNumCompileUnits()
INDENT = ' ' * 4
INDENT2 = INDENT * 2
for cu in exe_module.compile_unit_iter():
print cu
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
s20121035/rk3288_android5.1_repo
|
external/lldb/test/python_api/module_section/TestModuleAndSection.py
|
Python
|
gpl-3.0
| 4,688
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Package containing core luigi functionality.
"""
from luigi import task
from luigi.task import Task, Config, ExternalTask, WrapperTask, namespace
from luigi import target
from luigi.target import Target
from luigi import file # wtf @ naming
from luigi.file import File, LocalTarget
from luigi import rpc
from luigi.rpc import RemoteScheduler, RPCError
from luigi import parameter
from luigi.parameter import (
Parameter,
DateParameter, MonthParameter, YearParameter, DateHourParameter, DateMinuteParameter, DateSecondParameter,
DateIntervalParameter, TimeDeltaParameter,
IntParameter, FloatParameter, BooleanParameter, BoolParameter,
TaskParameter, EnumParameter, DictParameter, ListParameter, TupleParameter
)
from luigi import configuration
from luigi import interface
from luigi.interface import run, build
from luigi import event
from luigi.event import Event
from .tools import range # just makes the tool classes available from command line
__all__ = [
'task', 'Task', 'Config', 'ExternalTask', 'WrapperTask', 'namespace',
'target', 'Target', 'File', 'LocalTarget', 'rpc', 'RemoteScheduler',
'RPCError', 'parameter', 'Parameter', 'DateParameter', 'MonthParameter',
'YearParameter', 'DateHourParameter', 'DateMinuteParameter', 'DateSecondParameter', 'range',
'DateIntervalParameter', 'TimeDeltaParameter', 'IntParameter',
'FloatParameter', 'BooleanParameter', 'BoolParameter', 'TaskParameter',
'ListParameter', 'TupleParameter', 'EnumParameter', 'DictParameter',
'configuration', 'interface', 'file', 'run', 'build', 'event', 'Event'
]
|
javrasya/luigi
|
luigi/__init__.py
|
Python
|
apache-2.0
| 2,212
|
# 恒投交易客户端 文件接口
# 1. 支持csv/dbf文件的读写
# 2. 采用tdx作为行情数据源
# 华富资产 李来佳 28888502
import os
import sys
import copy
import csv
import dbf
import traceback
import pandas as pd
from typing import Any, Dict, List
from datetime import datetime, timedelta
from time import sleep
from functools import lru_cache
from collections import OrderedDict
from multiprocessing.dummy import Pool
from threading import Thread
from pytdx.hq import TdxHq_API
from pytdx.config.hosts import hq_hosts
from pytdx.params import TDXParams
from vnpy.event import EventEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import (
Exchange,
Product,
Direction,
OrderType,
Status,
Offset,
Interval
)
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
BarData,
CancelRequest,
OrderRequest,
SubscribeRequest,
TickData,
ContractData,
OrderData,
TradeData,
PositionData,
AccountData,
HistoryRequest
)
from vnpy.trader.utility import get_folder_path, print_dict, extract_vt_symbol, get_stock_exchange, append_data
from vnpy.data.tdx.tdx_common import get_stock_type_sz, get_stock_type_sh
# 通达信股票行情
from vnpy.data.tdx.tdx_common import get_cache_config, get_tdx_market_code
# 代码 <=> 中文名称
symbol_name_map: Dict[str, str] = {}
# 代码 <=> 交易所
symbol_exchange_map: Dict[str, Exchange] = {}
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
# 功能<->文件对应
PB_FILE_NAMES = {
'send_order': 'XHPT_WT', # 通用接口_委托
'cancel_order': 'XHPT_CD', # 通用接口_撤单
'update_orders': 'XHPT_WTCX', # 通用接口_委托查询
'update_trades': 'XHPT_CJCX', # 通用接口_成交查询
'positions': 'CC_STOCK_', # 持仓明细
'orders': 'WT_STOCK_', # 当日委托明细
'trades': 'CJ_STOCK_', # 当日成交明细
'accounts': 'ZJ_STOCK_' # 资金
}
SEND_ORDER_FIELDS = OrderedDict({
"CPBH": "C32", # 产品代码/基金代码 <-- 输入参数 -->
"ZCDYBH": "C16", # 单元编号/组合编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"WTJGLX": "C1", # 委托价格类型
"WTJG": "N11.4", # 委托价格
"WTSL": "N12", # 委托数量
"WBZDYXH": "N9", # 第三方系统自定义号
"WTXH": "N8", # 委托序号 <-- 输出参数 -->
"WTSBDM": "N8", # 委托失败代码
"SBYY": "C254", # 失败原因
"CLBZ": "C1", # 处理标志 <-- 内部自用字段 -->
"BYZD": "C2", # 备用字段
"WTJE": "N16.2", # 委托金额 <-- 扩充参数 -->
"TSBS": "C64", # 特殊标识
"YWBS": "C2", # 业务标识
})
# 撤单csv字段格式定义
CANCEL_ORDER_FIELDS = OrderedDict({
"WTXH": "N8", # 委托序号
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"CDCGBZ": "C1", # 撤单成功标志
"SBYY": "C254", # 失败原因
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"BYZD2": "C16", # 备用字段2
})
# 通用接口_委托查询
UPDATE_ORDER_FIELDS = OrderedDict({
"WTRQ": "N8", # 委托日期
"WTSJ": "N6", # 委托时间
"WTXH": "N8", # 委托序号
"WBZDYXH": "N9", # 第三方系统自定义号
"CPBH": "C32", # 产品(账户)编号
"ZCDYBH": "C16", # 资产单元编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"WTJGLX": "C1", # 委托价格类型
"WTJG": "N11.4", # 委托价格
"WTSL": "N12", # 委托数量
"YMDJJE": "N16.2", # 预买冻结金额
"YMSRJE": "N16.2", # 预卖收入金额
"WTZT": "C1", # 委托状态
"WTCCSL": "N12", # 委托撤成数量
"FDYY": "C254", # 废单原因
"JYSSBBH": "C64", # 交易所申报编号
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"WTJE": "N16.2", # 委托金额
"TSBS": "C64", # 特殊标识
})
# 通用接口_成交查询
UPDATE_TRADE_FIELDS = OrderedDict({
"CJRQ": "N8", # 成交日期
"CJBH": "C64", # 成交序号
"WTXH": "N8", # 委托序号
"WBZDYXH": "N9", # 第三方系统自定义号
"CPBH": "C32", # 产品(账户)编号
"ZCDYBH": "C16", # 资产单元编号
"ZHBH": "C16", # 组合编号
"GDDM": "C20", # 股东代码
"JYSC": "C3", # 交易市场
"ZQDM": "C16", # 证券代码
"WTFX": "C4", # 委托方向
"CJSL": "N16", # 成交数量
"CJJG": "N11.4", # 成交价格
"CJJE": "N16.2", # 成交金额
"ZFY": "N16.2", # 总费用
"CJSJ": "N6", # 成交时间
"CLBZ": "C1", # 处理标志
"BYZD": "C2", # 备用字段
"TSBS": "C64", # 特殊标识
"JYSCJBH": "C64", # 成交编号
})
# 交易所id <=> Exchange
EXCHANGE_PB2VT: Dict[str, Exchange] = {
"1": Exchange.SSE,
"2": Exchange.SZSE,
"3": Exchange.SHFE,
"4": Exchange.CZCE,
"7": Exchange.CFFEX,
"9": Exchange.DCE,
"k": Exchange.INE
}
EXCHANGE_VT2PB: Dict[Exchange, str] = {v: k for k, v in EXCHANGE_PB2VT.items()}
EXCHANGE_NAME2VT: Dict[str, Exchange] = {
"上交所A": Exchange.SSE,
"深交所A": Exchange.SZSE
}
# 方向 <=> Direction, Offset
DIRECTION_STOCK_PB2VT: Dict[str, Any] = {
"1": (Direction.LONG, Offset.NONE), # 买
"2": (Direction.SHORT, Offset.NONE), # 卖
"V": (Direction.LONG, Offset.OPEN), # 多,开
"X": (Direction.SHORT, Offset.OPEN), # 空,开
"Y": (Direction.LONG, Offset.CLOSE), # 多,平
"W": (Direction.SHORT, Offset.CLOSE) # 空, 平
}
DIRECTION_STOCK_VT2PB: Dict[Any, str] = {v: k for k, v in DIRECTION_STOCK_PB2VT.items()}
DIRECTION_STOCK_NAME2VT: Dict[str, Any] = {
"卖出": Direction.SHORT,
"买入": Direction.LONG,
"债券买入": Direction.LONG,
"债券卖出": Direction.SHORT,
"申购": Direction.LONG
}
DIRECTION_ORDER_PB2VT: Dict[str, Any] = {
"1": Direction.LONG,
"2": Direction.SHORT,
"3": Direction.LONG,
"4": Direction.SHORT
}
# 持仓方向 <=> Direction
POSITION_DIRECTION_PB2VT = {
"1": Direction.LONG,
"2": Direction.SHORT,
}
# 委托单类型
ORDERTYPE_PB2VT: Dict[str, OrderType] = {
"0": OrderType.LIMIT, # 限价单
"a": OrderType.MARKET, # 五档即成剩撤(上交所市价)
"b": OrderType.MARKET, # 五档即成剩转(上交所市价)
"A": OrderType.MARKET, # 五档即成剩撤(深交所市价)
"C": OrderType.MARKET, # 即成剩撤(深交所市价)
"D": OrderType.MARKET, # 对手方最优(深交所市价,上交所科创板市价)
"E": OrderType.MARKET, # 本方最优(深交所市价,上交所科创板市价)
}
def format_dict(d, dict_define):
"""根据dict格式定义进行value转换"""
for k in dict_define.keys():
# 原值
v = d.get(k, '')
# 目标转换格式
v_format = dict_define.get(k, None)
if v_format is None:
continue
if 'C' in v_format:
str_len = int(v_format.replace('C', ''))
new_v = '{}{}'.format(' ' * (str_len - len(v)), v)
d.update({k: new_v})
continue
elif "N" in v_format:
v_format = v_format.replace('N', '')
if '.' in v_format:
int_len, float_len = v_format.split('.')
int_len = int(int_len)
float_len = int(float_len)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
else:
int_len = int(v_format)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
d.update({k: new_v})
return d
def get_pb_order_type(exchange, order_type):
"""获取pb的委托类型"""
# 限价单
if order_type == OrderType.LIMIT:
return "0"
# 市价单
if exchange == Exchange.SSE:
return "a"
if exchange == Exchange.SZSE:
return "C"
return "0"
ORDERTYPE_NAME2VT: Dict[str, OrderType] = {
"五档即成剩撤": OrderType.MARKET,
"五档即成剩转": OrderType.MARKET,
"即成剩撤": OrderType.MARKET,
"对手方最优": OrderType.MARKET,
"本方最优": OrderType.MARKET,
"限价单": OrderType.LIMIT,
}
STATUS_NAME2VT: Dict[str, Status] = {
"未报": Status.SUBMITTING,
"待报": Status.SUBMITTING,
"正报": Status.SUBMITTING,
"已报": Status.NOTTRADED,
"废单": Status.REJECTED,
"部成": Status.PARTTRADED,
"已成": Status.ALLTRADED,
"部撤": Status.CANCELLED,
"已撤": Status.CANCELLED,
"待撤": Status.CANCELLING,
"未审批": Status.UNKNOWN,
"审批拒绝": Status.UNKNOWN,
"未审批即撤销": Status.UNKNOWN,
}
STATUS_PB2VT: Dict[str, Status] = {
"1": Status.SUBMITTING,
"2": Status.SUBMITTING,
"3": Status.SUBMITTING,
"4": Status.NOTTRADED,
"5": Status.REJECTED,
"6": Status.PARTTRADED,
"7": Status.ALLTRADED,
"8": Status.CANCELLED,
"9": Status.CANCELLED,
"a": Status.CANCELLING,
"b": Status.UNKNOWN,
"c": Status.UNKNOWN,
"d": Status.UNKNOWN,
}
STOCK_CONFIG_FILE = 'tdx_stock_config.pkb2'
class PbGateway(BaseGateway):
default_setting: Dict[str, Any] = {
"资金账号": "",
"数据目录": "",
"产品编号": "",
"单元编号": "",
"股东代码_沪": "",
"股东代码_深": "",
"文件格式": "dbf",
"导出子目录": "数据导出",
"pb版本": "2018"
}
# 接口支持得交易所清单
exchanges: List[Exchange] = list(EXCHANGE_VT2PB.keys())
def __init__(self, event_engine: EventEngine, gateway_name='PB'):
""""""
super().__init__(event_engine, gateway_name=gateway_name)
self.connect_time = datetime.now().strftime("%H%M")
self.order_manager = LocalOrderManager(self, self.connect_time, 4)
self.md_api = PbMdApi(self)
self.td_api = PbTdApi(self)
self.tq_api = None
self.tdx_connected = False # 通达信行情API得连接状态
self.file_type = 'dbf'
self.pb_version = '2018'
def connect(self, setting: dict) -> None:
""""""
userid = setting["资金账号"]
csv_folder = setting["数据目录"]
product_id = setting["产品编号"]
unit_id = setting["单元编号"]
holder_ids = {
Exchange.SSE: setting["股东代码_沪"],
Exchange.SZSE: setting["股东代码_深"]
}
self.file_type = setting.get('文件格式', 'dbf')
self.pb_version = setting.get('pb版本', '2018')
# 2019版,导出目录,自动增加一个‘数据导出’的子文件夹
# 2018版,导出目录,无自动增加的子目录
export_sub_folder = setting.get('导出子目录', '数据导出')
if len(export_sub_folder) > 0:
# 2019款
export_folder = os.path.abspath(os.path.join(csv_folder, export_sub_folder))
else:
# 2018款
export_folder = csv_folder
self.md_api.connect()
self.td_api.connect(user_id=userid,
order_folder=csv_folder,
account_folder=export_folder,
product_id=product_id,
unit_id=unit_id,
holder_ids=holder_ids)
# self.tq_api = TqMdApi(self)
# self.tq_api.connect()
self.init_query()
def close(self) -> None:
""""""
self.md_api.close()
self.td_api.close()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
if self.tq_api and self.tq_api.is_connected:
self.tq_api.subscribe(req)
else:
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
k = f'{req.vt_symbol}_{req.direction.value}_{req.offset.value}'
if len(self.rejected_orders.get(k, [])) > 5:
self.write_error(f'该合约相同请求已经被拒单五次,不能再发单:{print_dict(req.__dict__)}')
return ""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.td_api.cancel_order(req)
def query_account(self) -> None:
""""""
self.td_api.query_account()
def query_position(self) -> None:
""""""
self.td_api.query_position()
def query_orders(self) -> None:
self.td_api.query_orders()
def query_trades(self) -> None:
self.td_api.query_trades()
def process_timer_event(self, event) -> None:
""""""
self.count += 1
if self.count < 5:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self) -> None:
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class PbMdApi(object):
def __init__(self, gateway: PbGateway):
""""""
super().__init__()
self.gateway: PbGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.connect_status: bool = False
self.login_status: bool = False
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vtSymbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registed_symbol_set = set()
self.config = get_cache_config(STOCK_CONFIG_FILE)
self.symbol_dict = self.config.get('symbol_dict', {})
self.cache_time = self.config.get('cache_time', datetime.now() - timedelta(days=7))
self.commission_dict = {}
self.contract_dict = {}
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
# self.req_thread = None # 定时器线程
# copy.copy(hq_hosts)
self.ip_list = [{'ip': "180.153.18.170", 'port': 7709},
{'ip': "180.153.18.171", 'port': 7709},
{'ip': "180.153.18.172", 'port': 80},
{'ip': "202.108.253.130", 'port': 7709},
{'ip': "202.108.253.131", 'port': 7709},
{'ip': "202.108.253.139", 'port': 80},
{'ip': "60.191.117.167", 'port': 7709},
{'ip': "115.238.56.198", 'port': 7709},
{'ip': "218.75.126.9", 'port': 7709},
{'ip': "115.238.90.165", 'port': 7709},
{'ip': "124.160.88.183", 'port': 7709},
{'ip': "60.12.136.250", 'port': 7709},
{'ip': "218.108.98.244", 'port': 7709},
# {'ip': "218.108.47.69", 'port': 7709},
{'ip': "114.80.63.12", 'port': 7709},
{'ip': "114.80.63.35", 'port': 7709},
{'ip': "180.153.39.51", 'port': 7709},
# {'ip': '14.215.128.18', 'port': 7709},
# {'ip': '59.173.18.140', 'port': 7709}
]
self.best_ip = {'ip': None, 'port': None}
self.api_dict = {} # API 的连接会话对象字典
self.last_tick_dt = {} # 记录该会话对象的最后一个tick时间
self.security_count = 50000
# 股票code name列表
self.stock_codelist = None
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_security_count(TDXParams.MARKET_SZ) > 9000: # 0:深市 股票数量 = 9260
_timestamp = datetime.now() - __time1
self.gateway.write_log('服务器{}:{},耗时:{}'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应'.format(ip))
return timedelta(9, 9, 0)
except:
self.gateway.write_error(u'tdx ping服务器,异常的响应{}'.format(ip))
return timedelta(9, 9, 0)
def select_best_ip(self):
"""
选择行情服务器
:return:
"""
self.gateway.write_log(u'选择通达信股票行情服务器')
data_future = [self.ping(x.get('ip'), x.get('port')) for x in self.ip_list]
best_future_ip = self.ip_list[data_future.index(min(data_future))]
self.gateway.write_log(u'选取 {}:{}'.format(
best_future_ip['ip'], best_future_ip['port']))
return best_future_ip
def connect(self, n=3):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
for api in self.api_dict:
if api is not None or getattr(api, "client", None) is not None:
self.gateway.write_log(u'当前已经连接,不需要重新连接')
return
self.gateway.write_log(u'开始通达信行情服务器')
if len(self.symbol_dict) == 0:
self.gateway.write_error(f'本地没有股票信息的缓存配置文件')
else:
self.cov_contracts()
# 选取最佳服务器
if self.best_ip['ip'] is None and self.best_ip['port'] is None:
self.best_ip = self.select_best_ip()
# 创建n个api连接对象实例
for i in range(n):
try:
api = TdxHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
self.last_tick_dt[i] = datetime.now()
self.connection_status = True
self.security_count = c
# if len(symbol_name_map) == 0:
# self.get_stock_list()
except Exception as ex:
self.gateway.write_error(u'连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
# 创建连接池,每个连接都调用run方法
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
# 设置上层的连接状态
self.gateway.tdxConnected = True
def reconnect(self, i):
"""
重连
:param i:
:return:
"""
try:
self.best_ip = self.select_best_ip()
api = TdxHq_API(heartbeat=True, auto_retry=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'重新创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
sleep(1)
except Exception as ex:
self.gateway.write_error(u'重新连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.connection_status = False
# 设置上层的连接状态
self.gateway.tdxConnected = False
if self.pool is not None:
self.pool.close()
self.pool.join()
def subscribe(self, req):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(req.symbol)
if '.' in vn_symbol:
vn_symbol = vn_symbol.split('.')[0]
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
tdx_symbol = vn_symbol # [0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(tdx_symbol)
# 查询股票信息
self.qry_instrument(vn_symbol)
self.check_status()
def check_status(self):
# self.gateway.write_log(u'检查tdx接口状态')
if len(self.registed_symbol_set) == 0:
return True
# 若还没有启动连接,就启动连接
over_time = [((datetime.now() - dt).total_seconds() > 60) for dt in self.last_tick_dt.values()]
if not self.connection_status or len(self.api_dict) == 0 or any(over_time):
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.pool = None
self.api_dict = {}
pool_cout = getattr(self.gateway, 'tdx_pool_count', 3)
self.connect(pool_cout)
# self.gateway.write_log(u'tdx接口状态正常')
def qry_instrument(self, symbol):
"""
查询/更新股票信息
:return:
"""
if not self.connection_status:
return
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return
# TODO: 取得股票的中文名
market_code = get_tdx_market_code(symbol)
api.to_df(api.get_finance_info(market_code, symbol))
# 如果有预定的订阅合约,提前订阅
# if len(all_contacts) > 0:
# cur_folder = os.path.dirname(__file__)
# export_file = os.path.join(cur_folder,'contracts.csv')
# if not os.path.exists(export_file):
# df = pd.DataFrame(all_contacts)
# df.to_csv(export_file)
def cov_contracts(self):
"""转换本地缓存=》合约信息推送"""
for symbol_marketid, info in self.symbol_dict.items():
symbol, market_id = symbol_marketid.split('_')
exchange = info.get('exchange', '')
if len(exchange) == 0:
continue
vn_exchange_str = get_stock_exchange(symbol)
if exchange != vn_exchange_str:
continue
exchange = Exchange(exchange)
if info['stock_type'] == 'stock_cn':
product = Product.EQUITY
elif info['stock_type'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
elif info['stock_type'] == 'index_cn':
product = Product.INDEX
elif info['stock_type'] == 'etf_cn':
product = Product.ETF
else:
product = Product.EQUITY
volume_tick = info['volunit']
if symbol.startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
name=info['name'],
product=product,
pricetick=round(0.1 ** info['decimal_point'], info['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
if product != Product.INDEX:
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
def get_stock_list(self):
"""股票所有的code&name列表"""
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return None
self.gateway.write_log(f'查询所有的股票信息')
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh').set_index(
['code', 'sse'], drop=False) for i in range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j
in range(2)], axis=0)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(get_stock_type_sz))
sh = sh.assign(sec=sh.code.apply(get_stock_type_sh))
temp_df = pd.concat([sz, sh]).query('sec in ["stock_cn","etf_cn","bond_cn","cb_cn"]').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
hq_codelist = temp_df.loc[:, ['code', 'name']].set_index(['code'], drop=False)
for i in range(0, len(temp_df)):
row = temp_df.iloc[i]
if row['sec'] == 'etf_cn':
product = Product.ETF
elif row['sec'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
else:
product = Product.EQUITY
volume_tick = 100 if product != Product.BOND else 10
if row['code'].startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=Exchange.SSE if row['sse'] == 'sh' else Exchange.SZSE,
name=row['name'],
product=product,
pricetick=round(0.1 ** row['decimal_point'], row['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
return hq_codelist
def run(self, i):
"""
版本1:Pool内得线程,持续运行,每个线程从queue中获取一个请求并处理
版本2:Pool内线程,从订阅合约集合中,取出符合自己下标 mode n = 0的合约,并发送请求
:param i:
:return:
"""
# 版本2:
try:
api_count = len(self.api_dict)
last_dt = datetime.now()
self.gateway.write_log(u'开始运行tdx[{}],{}'.format(i, last_dt))
while self.connection_status:
symbols = set()
for idx, tdx_symbol in enumerate(list(self.registed_symbol_set)):
# self.gateway.write_log(u'tdx[{}], api_count:{}, idx:{}, tdx_symbol:{}'.format(i, api_count, idx, tdx_symbol))
if idx % api_count == i:
try:
symbols.add(tdx_symbol)
self.processReq(tdx_symbol, i)
except BrokenPipeError as bex:
self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), i))
self.reconnect(i)
sleep(5)
break
except Exception as ex:
self.gateway.write_error(
u'tdx[{}] exception:{},{}'.format(i, str(ex), traceback.format_exc()))
# api = self.api_dict.get(i,None)
# if api is None or getattr(api,'client') is None:
self.gateway.write_error(u'重试重连tdx[{}]'.format(i))
print(u'重试重连tdx[{}]'.format(i), file=sys.stderr)
self.reconnect(i)
# self.gateway.write_log(u'tdx[{}] sleep'.format(i))
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.gateway.write_log('tdx[{}] check point. {}, process symbols:{}'.format(i, dt, symbols))
last_dt = dt
except Exception as ex:
self.gateway.write_error(u'tdx[{}] pool.run exception:{},{}'.format(i, str(ex), traceback.format_exc()))
self.gateway.write_error(u'tdx[{}] {}退出'.format(i, datetime.now()))
def processReq(self, req, i):
"""
处理行情信息ticker请求
:param req:
:param i:
:return:
"""
symbol = req
if '.' in symbol:
symbol, exchange = symbol.split('.')
if exchange == 'SZSE':
market_code = 0
else:
market_code = 1
else:
market_code = get_tdx_market_code(symbol)
exchange = get_stock_exchange(symbol)
exchange = Exchange(exchange)
api = self.api_dict.get(i, None)
if api is None:
self.gateway.write_log(u'tdx[{}] Api is None'.format(i))
raise Exception(u'tdx[{}] Api is None'.format(i))
symbol_config = self.symbol_dict.get('{}_{}'.format(symbol, market_code), {})
decimal_point = symbol_config.get('decimal_point', 2)
# self.gateway.write_log(u'tdx[{}] get_instrument_quote:({},{})'.format(i,self.symbol_market_dict.get(symbol),symbol))
rt_list = api.get_security_quotes([(market_code, symbol)])
if rt_list is None or len(rt_list) == 0:
self.gateway.write_log(u'tdx[{}]: rt_list为空'.format(i))
return
# else:
# self.gateway.write_log(u'tdx[{}]: rt_list数据:{}'.format(i, rt_list))
if i in self.last_tick_dt:
self.last_tick_dt[i] = datetime.now()
# <class 'list'>: [OrderedDict([
# ('market', 0),
# ('code', '000001'),
# ('active1', 1385),
# ('price', 13.79),
# ('last_close', 13.69),
# ('open', 13.65), ('high', 13.81), ('low', 13.56),
# ('reversed_bytes0', 10449822), ('reversed_bytes1', -1379),
# ('vol', 193996), ('cur_vol', 96),
# ('amount', 264540864.0),
# ('s_vol', 101450),
# ('b_vol', 92546),
# ('reversed_bytes2', 0), ('reversed_bytes3', 17185),
# ('bid1', 13.79), ('ask1', 13.8), ('bid_vol1', 877), ('ask_vol1', 196),
# ('bid2', 13.78), ('ask2', 13.81), ('bid_vol2', 2586), ('ask_vol2', 1115),
# ('bid3', 13.77), ('ask3', 13.82), ('bid_vol3', 1562), ('ask_vol3', 807),
# ('bid4', 13.76), ('ask4', 13.83), ('bid_vol4', 211), ('ask_vol4', 711),
# ('bid5', 13.75), ('ask5', 13.84), ('bid_vol5', 1931), ('ask_vol5', 1084),
# ('reversed_bytes4', (385,)), ('reversed_bytes5', 1), ('reversed_bytes6', -41), ('reversed_bytes7', -29), ('reversed_bytes8', 1), ('reversed_bytes9', 0.88),
# ('active2', 1385)])]
dt = datetime.now()
for d in list(rt_list):
# 忽略成交量为0的无效单合约tick数据
if d.get('cur_vol', 0) <= 0:
# self.gateway.write_log(u'忽略成交量为0的无效单合约tick数据:')
continue
code = d.get('code', None)
if symbol != code and code is not None:
self.gateway.write_log(u'忽略合约{} {} 不一致的tick数据:{}'.format(symbol, d.get('code'), rt_list))
continue
tick = TickData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
datetime=dt,
date=dt.strftime('%Y-%m-%d'),
time=dt.strftime('%H:%M:%S')
)
if decimal_point > 2:
tick.pre_close = round(d.get('last_close') / (10 ** (decimal_point - 2)), decimal_point)
tick.high_price = round(d.get('high') / (10 ** (decimal_point - 2)), decimal_point)
tick.open_price = round(d.get('open') / (10 ** (decimal_point - 2)), decimal_point)
tick.low_price = round(d.get('low') / (10 ** (decimal_point - 2)), decimal_point)
tick.last_price = round(d.get('price') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_price_1 = round(d.get('bid1') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = round(d.get('ask1') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = round(d.get('bid2') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = round(d.get('ask2') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = round(d.get('bid3') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = round(d.get('ask3') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = round(d.get('bid4') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = round(d.get('ask4') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = round(d.get('bid5') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = round(d.get('ask5') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_5 = d.get('ask_vol5')
else:
tick.pre_close = d.get('last_close')
tick.high_price = d.get('high')
tick.open_price = d.get('open')
tick.low_price = d.get('low')
tick.last_price = d.get('price')
tick.bid_price_1 = d.get('bid1')
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = d.get('ask1')
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = d.get('bid2')
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = d.get('ask2')
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = d.get('bid3')
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = d.get('ask3')
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = d.get('bid4')
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = d.get('ask4')
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = d.get('bid5')
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = d.get('ask5')
tick.ask_volume_5 = d.get('ask_vol5')
tick.volume = d.get('vol', 0)
tick.open_interest = d.get('amount', 0)
# 修正毫秒
last_tick = self.symbol_tick_dict.get(symbol, None)
if (last_tick is not None) and tick.datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick.datetime = tick.datetime.replace(microsecond=500)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
else:
tick.datetime = tick.datetime.replace(microsecond=0)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = tick.datetime.strftime('%Y-%m-%d')
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# 排除非交易时间得tick
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
return
elif tick.datetime.hour == 9 and tick.datetime.minute <= 25:
return
elif tick.datetime.hour == 15 and tick.datetime.minute >= 0:
return
self.symbol_tick_dict[symbol] = tick
self.gateway.on_tick(tick)
class PbTdApi(object):
def __init__(self, gateway: PbGateway):
""""""
super().__init__()
self._active = False
self.gateway: PbGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.userid: str = "" # 资金账号
self.product_id: str = "" # 产品编号(在pb客户端看到)
self.unit_id: str = "1" # 单元编号(在pb客户端设置),缺省是1
self.holder_ids = {}
self.order_folder = "" # 埋单csv文件所在目录
self.account_folder = "" # 账号导出csv所在目录
# 缓存了当前交易日
self.trading_day = datetime.now().strftime('%Y-%m-%d')
self.trading_date = self.trading_day.replace('-', '')
self.connect_status: bool = False
self.login_status: bool = False
# 所有交易
self.trades = {} # tradeid: trade
# 本gateway以外的委托
self.orders = {} # sys_orderid: order
# 未获取本地更新检查的orderid清单
self.unchecked_orderids = []
# 警告
self.warning_dict = {}
def close(self):
pass
def connect(self, user_id, order_folder, account_folder, product_id, unit_id="1", holder_ids={}):
"""连接"""
self.userid = user_id
self.order_folder = order_folder
self.product_id = product_id
self.unit_id = unit_id
self.holder_ids = holder_ids
if os.path.exists(self.order_folder):
self.connect_status = True
self.account_folder = account_folder
if os.path.exists(self.account_folder):
self.login_status = True
# 仅查询一次
self.query_trades()
# 仅全局查询一次
self.query_orders()
# 首次连接时,优先全部撤单
self.cancel_all()
if self.gateway.file_type == 'dbf':
self.gateway.query_functions.append(self.query_update_trades_dbf)
self.gateway.query_functions.append(self.query_update_orders_dbf)
def get_data(self, file_path, field_names=None):
"""获取文件内容"""
if not os.path.exists(file_path):
return None
results = []
try:
with open(file=file_path, mode='r', encoding='gbk', ) as f:
reader = csv.DictReader(f=f, fieldnames=field_names, delimiter=",")
for row in reader:
results.append(row)
except Exception as ex:
self.gateway.write_error(f'读取csv文件数据异常:{str(ex)}')
return results
def query_account(self):
if self.gateway.file_type == 'dbf':
self.query_account_dbf()
else:
self.query_account_csv()
def query_account_dbf(self):
"""获取资金账号信息"""
# dbf 文件名
account_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
copy_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}_{}.dbf'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date,
datetime.now().strftime('%H%M'))))
try:
if not os.path.exists(account_dbf):
return
if os.path.exists(copy_dbf):
os.remove(copy_dbf)
# =》转移至于新文件
os.rename(account_dbf, copy_dbf)
if not os.path.exists(copy_dbf):
return
# dbf => 资金帐号信息
self.gateway.write_log(f'扫描资金帐号信息:{copy_dbf}')
table = dbf.Table(copy_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# ["资金账户"]
if str(data.zjzh).strip() != self.userid:
continue
account = AccountData(
gateway_name=self.gateway_name,
accountid=self.userid,
balance=float(data.dyjz), # ["单元净值"]
frozen=float(data.dyjz) - float(data.kyye), # data["可用余额"]
currency="人民币",
trading_day=self.trading_day
)
self.gateway.on_account(account)
table.close()
self.warning_dict.pop('query_account', None)
if os.path.exists(copy_dbf):
os.remove(copy_dbf)
except Exception as ex:
err_msg = f'dbf扫描资金帐号异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_account', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_account': err_info})
def query_account_csv(self):
"""获取资金账号信息"""
if self.gateway.pb_version == '2018':
# 账号的文件
accounts_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
else:
# 账号的文件
accounts_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('accounts'),
self.trading_date)))
# csv => 所有账号资金清单
account_list = self.get_data(accounts_csv)
if not account_list:
return
for data in account_list:
if data["资金账户"] != self.userid:
continue
account = AccountData(
gateway_name=self.gateway_name,
accountid=self.userid,
balance=float(data["单元净值"]),
frozen=float(data["单元净值"]) - float(data["可用余额"]),
currency="人民币",
trading_day=self.trading_day
)
self.gateway.on_account(account)
def query_position(self):
"""获取持仓信息"""
if self.gateway.file_type == 'dbf':
self.query_position_dbf()
else:
self.query_position_csv()
def query_position_dbf(self):
"""从dbf文件获取持仓信息"""
# fields:['zqgs', 'zjzh', 'zhlx', 'zqdm', 'zqmc', 'zqlb', 'zxjg', 'cbjg', 'cpbh', 'cpmc', 'dybh', 'dymc', 'ccsl', 'dqcb', 'kysl', 'jjsz', 'qjsz', 'zqlx'
# , 'jysc', 'jybz', 'dryk', 'ljyk', 'fdyk', 'fyl', 'ykl', 'tzlx', 'gddm', 'mrsl', 'mcsl', 'mrje', 'mcje', 'zdf', 'bbj', 'qjcb', 'gtcb', 'gtyk', 'zgb']
# dbf 文件名
position_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
copy_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}_{}.dbf'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date,
datetime.now().strftime('%H%M'))))
try:
if not os.path.exists(position_dbf):
return
if os.path.exists(copy_dbf):
os.remove(copy_dbf)
os.rename(position_dbf, copy_dbf)
if not os.path.exists(copy_dbf):
return
# dbf => 股票持仓信息
self.gateway.write_log(f'扫描股票持仓信息:{copy_dbf}')
table = dbf.Table(copy_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip() != self.userid:
continue
symbol = str(data.zqdm).strip() # ["证券代码"]
# symbol => Exchange
exchange = symbol_exchange_map.get(symbol, None)
if not exchange:
exchange_str = get_stock_exchange(code=symbol)
if len(exchange_str) > 0:
exchange = Exchange(exchange_str)
symbol_exchange_map.update({symbol: exchange})
name = symbol_name_map.get(symbol, None)
if not name:
name = data.zqmc # ["证券名称"]
symbol_name_map.update({symbol: name})
position = PositionData(
gateway_name=self.gateway_name,
accountid=self.userid,
symbol=symbol, # ["证券代码"],
exchange=exchange,
direction=Direction.NET,
name=name,
volume=int(data.ccsl), # ["持仓数量"]
yd_volume=int(data.kysl), # ["可用数量"]
price=float(data.cbjg), # ["成本价"]
cur_price=float(data.zxjg), # ["最新价"]
pnl=float(data.fdyk), # ["浮动盈亏"]
holder_id=str(data.gddm).strip() # ["股东"]
)
self.gateway.on_position(position)
table.close()
self.warning_dict.pop('query_position', None)
if os.path.exists(copy_dbf):
os.remove(copy_dbf)
except Exception as ex:
err_msg = f'dbf扫描股票持仓异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_position', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_position': err_info})
def query_position_csv(self):
"""从csv获取持仓信息"""
if self.gateway.pb_version == '2018':
# 持仓的文件
positions_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
else:
# 持仓的文件
positions_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('positions'),
self.trading_date)))
# csv => 所有持仓清单
position_list = self.get_data(positions_csv)
if not position_list:
return
for data in position_list:
if data["资金账户"] != self.userid:
continue
symbol = data["证券代码"]
# symbol => Exchange
exchange = symbol_exchange_map.get(symbol, None)
if not exchange:
exchange_str = get_stock_exchange(code=symbol)
if len(exchange_str) > 0:
exchange = Exchange(exchange_str)
symbol_exchange_map.update({symbol: exchange})
name = symbol_name_map.get(symbol, None)
if not name:
name = data["证券名称"]
symbol_name_map.update({symbol: name})
position = PositionData(
gateway_name=self.gateway_name,
accountid=self.userid,
symbol=data["证券代码"],
exchange=exchange,
direction=Direction.NET,
name=name,
volume=int(data["持仓数量"]),
yd_volume=int(data["可用数量"]),
price=float(data["成本价"]),
cur_price=float(data["最新价"]),
pnl=float(data["浮动盈亏"]),
holder_id=data["股东"]
)
self.gateway.on_position(position)
def query_orders(self):
if self.gateway.file_type == 'dbf':
self.query_orders_dbf()
else:
self.query_orders_csv()
def query_orders_dbf(self):
"""dbf文件获取所有委托"""
# fields:['zqgs', 'zjzh', 'zhlx', 'cpbh', 'cpmc', 'dybh', 'dymc', 'wtph', 'wtxh', 'zqdm', 'zqmc', 'wtfx', 'jglx', 'wtjg', 'wtsl', 'wtzt', 'cjsl', 'wtje'
# , 'cjjj', 'cdsl', 'jysc', 'fdyy', 'wtly', 'wtrq', 'wtsj', 'jybz']
orders_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
try:
if not os.path.exists(orders_dbf):
return
# dbf => 股票委托信息
self.gateway.write_log(f'扫描股票委托信息:{orders_dbf}')
table = dbf.Table(orders_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip() != self.userid: # ["资金账户"]
continue
sys_orderid = str(data.wtxh).strip() # ["委托序号"]
# 检查是否存在本地order_manager缓存中
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
order_date = str(data.wtrq).strip() # ["委托日期"]
order_time = str(data.wtsj).strip() # ["委托时间"]
order_status = STATUS_NAME2VT.get(str(data.wtzt).strip()) # ["委托状态"]
# 检查是否存在本地orders缓存中(系统级别的委托单)
sys_order = self.orders.get(sys_orderid, None)
if order is not None:
continue
# 委托单不存在本地映射库,说明是其他地方下的单子,不是通过本接口下单
if sys_order is None:
# 不处理以下状态
if order_status in [Status.SUBMITTING, Status.REJECTED, Status.CANCELLED, Status.CANCELLING]:
continue
order_dt = datetime.strptime(f'{order_date} {order_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip()) # ["委托方向"]
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
sys_order = OrderData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(), # ["证券代码"]
exchange=EXCHANGE_NAME2VT.get(str(data.jysc).strip()), # ["交易市场"]
orderid=sys_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_NAME2VT.get(str(data.jglx).strip(), OrderType.LIMIT), # ["价格类型"]
direction=direction,
offset=offset,
price=float(data.wtjg), # ["委托价格"]
volume=float(data.wtsl), # ["委托数量"]
traded=float(data.cjsl), # ["成交数量"]
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
# 直接发出订单更新事件
self.gateway.write_log(f'账号订单查询,新增:{sys_order.__dict__}')
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.on_order(sys_order)
continue
# 存在账号缓存,判断状态是否更新
else:
# 暂不处理,交给XHPT_WTCX模块处理
if sys_order.status != order_status or sys_order.traded != float(data.cjsl): # ["成交数量"]
sys_order.traded = float(data.cjsl) # ["成交数量"]
sys_order.status = order_status
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.write_log(f'账号订单查询,更新:{sys_order.__dict__}')
self.gateway.on_order(sys_order)
continue
table.close()
self.warning_dict.pop('query_orders', None)
except Exception as ex:
err_msg = f'dbf扫描股票委托异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_orders', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_orders': err_info})
def query_orders_csv(self):
"""获取所有委托"""
# 所有委托的文件
if self.gateway.pb_version == '2018':
orders_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
else:
orders_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('orders'),
self.trading_date)))
# csv => 所有委托记录
order_list = self.get_data(orders_csv)
if not order_list:
return
for data in order_list:
if data["资金账户"] != self.userid:
continue
sys_orderid = str(data["委托序号"])
# 检查是否存在本地order_manager缓存中
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
order_date = data["委托日期"]
order_time = data["委托时间"]
order_status = STATUS_NAME2VT.get(data["委托状态"])
# 检查是否存在本地orders缓存中(系统级别的委托单)
sys_order = self.orders.get(sys_orderid, None)
if order is not None:
continue
# 委托单不存在本地映射库,说明是其他地方下的单子,不是通过本接口下单
if sys_order is None:
# 不处理以下状态
if order_status in [Status.SUBMITTING, Status.REJECTED, Status.CANCELLED, Status.CANCELLING]:
continue
order_dt = datetime.strptime(f'{order_date} {order_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(data["委托方向"])
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
sys_order = OrderData(
gateway_name=self.gateway_name,
symbol=data["证券代码"],
exchange=EXCHANGE_NAME2VT.get(data["交易市场"]),
orderid=sys_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_NAME2VT.get(data["价格类型"], OrderType.LIMIT),
direction=direction,
offset=offset,
price=float(data["委托价格"]),
volume=float(data["委托数量"]),
traded=float(data["成交数量"]),
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
# 直接发出订单更新事件
self.gateway.write_log(f'账号订单查询,新增:{sys_order.__dict__}')
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.on_order(sys_order)
continue
# 存在账号缓存,判断状态是否更新
else:
# 暂不处理,交给XHPT_WTCX模块处理
if sys_order.status != order_status or sys_order.traded != float(data["成交数量"]):
sys_order.traded = float(data["成交数量"])
sys_order.status = order_status
self.orders.update({sys_order.sys_orderid: sys_order})
self.gateway.write_log(f'账号订单查询,更新:{sys_order.__dict__}')
self.gateway.on_order(sys_order)
continue
def query_update_orders_dbf(self):
"""扫描批量下单的委托查询(dbf文件格式)"""
# XHPT_WTCX委托的dbf文件
orders_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
try:
if not os.path.exists(orders_dbf):
return
# dbf => 所有成交记录
self.gateway.write_log(f'扫描所有委托查询:{orders_dbf}')
table = dbf.Table(orders_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# 第三方系统自定义号
local_orderid = str(data.wbzdyxh)
if len(local_orderid) == 0:
self.gateway.write_log(f'获取不到本地委托号:{print_dict(data.__dict__)}')
continue
# 如果不足8位,自动补充0
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
# 委托状态=>
order_status = STATUS_PB2VT.get(str(data.wtzt))
# 恒生平台返回的委托序号
sys_orderid = str(data.wtxh)
if len(sys_orderid) == 0:
self.gateway.write_log(f'获取不到恒生平台的委托序号:{print_dict(data.__dict__)}')
continue
# 通过本地委托编号,检查是否存在本地订单列表中
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
if order is None:
self.gateway.write_log(f'本地委托编号{local_orderid}不在本地订单中')
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
if order_status == Status.ALLTRADED:
traded = data.wtsl
else:
traded = 0
order_dt = datetime.strptime(f'{data.wtrq} {data.wtsj}', "%Y%m%d %H%M%S")
exchange = EXCHANGE_PB2VT.get(str(data.jysc).strip())
new_order = OrderData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(),
exchange=exchange,
orderid=local_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_PB2VT.get(str(data.wtjglx).strip(), OrderType.LIMIT),
direction=direction,
offset=offset,
price=float(data.wtjg),
volume=float(data.wtsl),
traded=traded,
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
self.gateway.write_log(f'补充委托记录:{print_dict(new_order.__dict__)}')
self.gateway.order_manager.on_order(new_order)
continue
if order.sys_orderid != sys_orderid:
pre_sys_orderid = order.sys_orderid
order.sys_orderid = sys_orderid
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
self.gateway.write_log(
f'绑定local_orderid:{local_orderid}, <=> 系统委托号:{pre_sys_orderid}=>{sys_orderid}')
if local_orderid in self.unchecked_orderids:
self.unchecked_orderids.remove(local_orderid)
# 如果委托状态是已经撤单,拒单,已成交,就不处理
if order.status in [Status.CANCELLED, Status.REJECTED, Status.ALLTRADED]:
continue
if order.status != order_status:
self.gateway.write_log(f'{local_orderid} 状态:{order.status.value} => {order_status.value}')
order.status = order_status
if order.status == Status.CANCELLED:
order.cancel_time = datetime.now().strftime('%H:%M:%S')
if order.status == Status.ALLTRADED and order.traded != order.volume:
self.gateway.write_log(f'dbf批量下单,委托单全成交,成交数:{order.traded}=>{order.volume}')
order.traded = order.volume
self.gateway.write_log(f'dbf批量下单,委托单更新:{order.__dict__}')
self.gateway.order_manager.on_order(order)
continue
table.close()
self.warning_dict.pop('query_update_order', None)
except Exception as ex:
err_msg = f'dbf查询委托库异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_update_order', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_update_order': err_info})
def query_update_orders_csv(self):
"""扫描批量下单的委托查询(csv文件格式)"""
# XHPT_WTCX委托的CSV文件
orders_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
# csv => 所有委托记录
order_list = self.get_data(orders_csv, field_names=UPDATE_ORDER_FIELDS.keys())
if not order_list:
return
for data in order_list:
# 第三方系统自定义号
local_orderid = str(data["WBZDYXH"]).lstrip()
if len(local_orderid) == 0:
continue
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
if order is None:
continue
# 恒生平台返回的委托序号
sys_orderid = str(data['WTXH']).lstrip()
if len(sys_orderid) == 0:
continue
if order.sys_orderid != sys_orderid:
pre_sys_orderid = order.sys_orderid
order.sys_orderid = sys_orderid
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
self.gateway.write_log(f'绑定local_orderid:{local_orderid}, <=> 系统委托号:{pre_sys_orderid}=>{sys_orderid}')
if local_orderid in self.unchecked_orderids:
self.unchecked_orderids.remove(local_orderid)
# 如果委托状态是已经撤单,拒单,已成交,就不处理
if order.status in [Status.CANCELLED, Status.REJECTED, Status.ALLTRADED]:
continue
order_status = STATUS_PB2VT.get(data["WTZT"])
if order.status != order_status:
self.gateway.write_log(f'{local_orderid} 状态:{order.status.value} => {order_status.value}')
order.status = order_status
if order.status == Status.CANCELLED:
order.cancel_time = datetime.now().strftime('%H:%M:%S')
if order.status == Status.ALLTRADED and order.traded != order.volume:
self.gateway.write_log(f'csv批量下单,委托单全成交,成交数:{order.traded}=>{order.volume}')
order.traded = order.volume
self.gateway.write_log(f'csv批量下单,委托更新:{order.__dict__}')
self.gateway.order_manager.on_order(order)
continue
def query_trades(self):
if self.gateway.file_type == 'dbf':
self.query_trades_dbf()
else:
self.query_trades_csv()
def query_trades_dbf(self):
"""dbf文件获取所有成交"""
# fields:['zqgs', 'zjzh', 'zhlx', 'cpbh', 'cpmc', 'dybh', 'dymc', 'cjxh', 'wtph', 'wtxh', 'zqdm', 'zqmc', 'wtfx', 'zqlb', 'ywfl', 'cjrq', 'cjsj', 'cjsl'
# , 'cjjg', 'zfy', 'cjje', 'jysc', 'jybz', 'wtly', 'rybh', 'rymc']
trades_dbf = os.path.abspath(os.path.join(self.account_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
try:
if not os.path.exists(trades_dbf):
return
# dbf => 股票成交信息
self.gateway.write_log(f'扫描股票成交信息:{trades_dbf}')
table = dbf.Table(trades_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
if str(data.zjzh).strip() != self.userid: # ["资金账户"]
continue
sys_orderid = str(data.wtxh) # ["委托序号"]
sys_tradeid = str(data.cjxh) # ["成交序号"]
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order is None:
trade_date = str(data.cjrq).strip() # ["成交日期"]
trade_time = str(data.cjsj).strip() # ["成交时间"]
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(str(data.wtfx).strip()) # ["委托方向"]
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(), # ["证券代码"]
exchange=EXCHANGE_NAME2VT.get(str(data.jysc).strip()), # ["交易市场"]
orderid=sys_tradeid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data.cjjg), # ["成交价格"]
volume=float(data.cjsl), # ["成交数量"]
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data.cjje), # ["成交金额"]
commission=float(data.zfy) # ["总费用"]
)
self.trades[sys_tradeid] = trade
self.gateway.on_trade(copy.copy(trade))
continue
table.close()
self.warning_dict.pop('query_trades', None)
except Exception as ex:
err_msg = f'dbf扫描股票成交异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_trades', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_trades': err_info})
def query_trades_csv(self):
"""获取所有成交"""
# 所有成交的文件
if self.gateway.pb_version == '2018':
trades_csv = os.path.abspath(os.path.join(self.account_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
else:
trades_csv = os.path.abspath(os.path.join(self.account_folder,
self.trading_date,
'{}{}.csv'.format(
PB_FILE_NAMES.get('trades'),
self.trading_date)))
# csv => 所有成交记录
trade_list = self.get_data(trades_csv)
if not trade_list:
return
for data in trade_list:
if data["资金账户"] != self.userid:
continue
sys_orderid = str(data["委托序号"])
sys_tradeid = str(data["成交序号"])
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order is None:
trade_date = data["成交日期"]
trade_time = data["成交时间"]
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_STOCK_NAME2VT.get(data["委托方向"])
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=data["证券代码"],
exchange=EXCHANGE_NAME2VT.get(data["交易市场"]),
orderid=sys_tradeid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data["成交价格"]),
volume=float(data["成交数量"]),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data["成交金额"]),
commission=float(data["总费用"])
)
self.trades[sys_tradeid] = trade
self.gateway.on_trade(copy.copy(trade))
continue
def query_update_trades_dbf(self):
"""获取接口的dbf成交更新"""
# 所有成交的dbf文件
trades_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_trades'),
self.trading_date)))
try:
if not os.path.exists(trades_dbf):
return
# dbf => 所有成交记录
self.gateway.write_log(f'扫描所有成交记录:{trades_dbf}')
table = dbf.Table(trades_dbf, codepage='cp936')
table.open(dbf.READ_ONLY)
for data in table:
# 本地委托号
local_orderid = str(data.wbzdyxh).strip()
if 0 < len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
# 系统委托号
sys_orderid = str(data.wtxh).strip()
# 系统交易号
sys_tradeid = str(data.cjbh).strip()
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order:
trade_date = str(data.cjrq).strip()
trade_time = str(data.cjsj).strip()
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_ORDER_PB2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data.zqdm).strip(),
exchange=EXCHANGE_PB2VT.get(str(data.jysc).strip()),
orderid=local_orderid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(data.cjjg),
volume=int(data.cjsl),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data.cjje),
commission=float(data.zfy),
holder_id=str(data.gddm).strip()
)
# 保存交易记录
self.trades[sys_tradeid] = trade
# 更新订单的成交数量
if order.volume >= order.traded + trade.volume:
pre_traded = order.traded
order.traded += trade.volume
self.gateway.write_log(
f'{local_orderid}/{sys_orderid} 成交数量:{pre_traded} =>{order.traded} ,目标:{order.volume}')
# 发送成交更新
self.gateway.on_trade(copy.copy(trade))
continue
table.close()
self.warning_dict.pop('query_update_trade', None)
except Exception as ex:
err_msg = f'dbf查询成交库异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_update_trade', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_update_trade': err_info})
def query_update_trades_csv(self):
"""获取接口的csv成交更新"""
# 所有成交的csv文件
trades_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('update_trades'),
self.trading_date)))
# csv => 所有成交记录
trade_list = self.get_data(trades_csv, field_names=UPDATE_TRADE_FIELDS.keys())
if not trade_list:
return
for data in trade_list:
local_orderid = str(data["WBZDYXH"]).lstrip()
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
sys_orderid = str(data["WTXH"]).lstrip()
sys_tradeid = str(data["CJBH"]).lstrip()
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.gateway.order_manager.get_order_with_sys_orderid(sys_orderid)
# 如果交易不再本地映射关系
if trade is None and order:
trade_date = str(data["CJRQ"]).lstrip()
trade_time = str(data["CJSJ"]).lstrip()
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y%m%d %H%M%S")
direction = DIRECTION_ORDER_PB2VT.get(str(data.wtfx).strip())
offset = Offset.NONE
if direction is None:
direction = Direction.NET
elif direction == Direction.LONG:
offset = Offset.OPEN
elif direction == Direction.SHORT:
offset = Offset.CLOSE
trade = TradeData(
gateway_name=self.gateway_name,
symbol=str(data["ZQDM"]).lstrip(),
exchange=EXCHANGE_PB2VT.get(str(data["JYSC"]).lstrip()),
orderid=local_orderid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=direction,
offset=offset,
price=float(str(data["CJJG"]).lstrip()),
volume=float(str(data["CJSL"]).lstrip()),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(str(data["CJJE"]).lstrip()),
commission=float(str(data["ZFY"]).lstrip()),
holder_id=str(data['GDDM']).lstrip()
)
# 保存交易记录
self.trades[sys_tradeid] = trade
# 更新订单的成交数量
if order.volume >= order.traded + trade.volume:
pre_traded = order.traded
order.traded += trade.volume
self.gateway.write_log(
f'{local_orderid}/{sys_orderid} 成交数量:{pre_traded} =>{order.traded} ,目标:{order.volume}')
# 发送成交更新
self.gateway.on_trade(copy.copy(trade))
continue
def check_send_order_dbf(self):
"""检查更新委托文件dbf"""
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
try:
if not os.path.exists(dbf_file):
return
table = dbf.Table(dbf_file, codepage='cp936')
table.open(dbf.READ_ONLY)
for record in table:
local_orderid = str(record.wbzdyxh)
if len(local_orderid) < 8:
local_orderid = local_orderid.rjust(8, '0')
if local_orderid not in self.unchecked_orderids:
continue
# 从本地order_manager中获取order
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
# 判断order取不到,或者order状态不是SUBMITTING
if order is None or order.status != Status.SUBMITTING:
continue
# 检查是否具有系统委托编号
if order.sys_orderid == "":
sys_orderid = str(getattr(record, 'wtxh', ''))
if len(sys_orderid) == 0:
continue
# 委托失败标志
if sys_orderid == "0":
err_msg = record.sbyy
if isinstance(err_msg, bytes):
err_msg = err_msg.decode('gbk')
if len(err_msg) == 0 or record.wtsbdm == 0:
self.gateway.write_log(f'收到失败,又没有失败原因')
continue
err_id = str(getattr(record, 'wtsbdm', '')).strip()
order.status = Status.REJECTED
self.gateway.write_log(f'dbf批量下单,委托被拒:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_error(msg=f'{order.direction.value},{order.vt_symbol},{err_msg}',
error={"ErrorID": err_id, "ErrorMsg": "委托失败"})
if sys_orderid not in ['0','None']:
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid,
sys_orderid=sys_orderid)
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.gateway.write_log(f'绑定本地local_orderid:{local_orderid} <=>sys_orderid:{sys_orderid}')
self.gateway.write_log(f'dbf批量下单,委托接受:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_log(f'委托成功')
# 移除检查的id
self.gateway.write_log(f'本地委托单更新检查完毕,移除{local_orderid}')
self.unchecked_orderids.remove(local_orderid)
table.close()
self.warning_dict.pop('query_send_order', None)
except Exception as ex:
err_msg = f'dbf查询系统委托号异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('query_send_order', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'query_send_order': err_info})
def check_send_order_csv(self):
"""检查更新委托文件csv"""
# 当日send_order的文件
send_order_csv = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(
PB_FILE_NAMES.get('send_order'),
self.trading_date)))
# csv => 所有send_order记录
order_list = self.get_data(send_order_csv, field_names=SEND_ORDER_FIELDS.keys())
# 逐一处理
for data in order_list:
local_orderid = data.get('WBZDYXH', "").lstrip(' ')
if local_orderid is "":
continue
if local_orderid not in self.unchecked_orderids:
continue
# 从本地order_manager中获取order
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid)
# 判断order取不到,或者order状态不是SUBMITTING
if order is None or order.status != Status.SUBMITTING:
continue
# 检查是否具有系统委托编号
if order.sys_orderid == "":
sys_orderid = data.get('WTXH', '').lstrip(' ')
if len(sys_orderid) == 0:
continue
err_msg = data.get('SBYY', '').lstrip(' ')
# 委托失败标志
if sys_orderid == "0":
if len(err_msg) == 0:
self.gateway.write_log(f'收到失败标准,又没有失败原因:{print_dict(data.__dict__)}')
continue
err_id = data.get('WTSBDM', '').lstrip(' ')
order.status = Status.REJECTED
self.gateway.write_log(f'csv批量下单,委托被拒:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_error(msg=err_msg, error={"ErrorID": err_id, "ErrorMsg": "委托失败"})
if sys_orderid != '0':
self.gateway.order_manager.update_orderid_map(local_orderid=local_orderid, sys_orderid=sys_orderid)
order.sys_orderid = sys_orderid
order.status = Status.NOTTRADED
self.gateway.write_log(f'csv批量下单,委托被接受:{order.__dict__}')
self.gateway.order_manager.on_order(order)
self.gateway.write_log(f'委托成功')
# 移除检查的id
self.gateway.write_log(f'本地委托单更新检查完毕,移除{local_orderid}')
self.unchecked_orderids.remove(local_orderid)
def send_order(self, req: OrderRequest):
"""委托发单"""
self.gateway.write_log(f'委托发单:{req.__dict__}')
if self.gateway.file_type == 'dbf':
return self.send_order_dbf(req)
else:
return self.send_order_csv(req)
def send_order_dbf(self, req: OrderRequest):
"""通过dbf文件进行发单"""
# 发生委托,才添加批量埋单接口的委托、成交检查
if self.query_update_trades_dbf not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_trades_dbf)
if self.query_update_orders_dbf not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_orders_dbf)
# 创建本地orderid(str格式, HHMM+00序列号)
local_orderid = self.gateway.order_manager.new_local_orderid()
# req => order
order = req.create_order_data(orderid=local_orderid, gateway_name=self.gateway_name)
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
# 股票买卖,强制offset = Offset.NONE
order.offset = Offset.NONE
contract = self.gateway.md_api.contract_dict.get(f'{order.symbol}.{order.exchange.value}')
direction = DIRECTION_STOCK_VT2PB.get((order.direction, order.offset)) # 委托方向
if contract:
if contract.product == Product.BOND:
if direction == '1':
direction = '3'
else:
direction = '4'
data = (
self.product_id, # "CPBH": "C32", # 产品代码/基金代码 <-- 输入参数 -->
self.unit_id, # "ZCDYBH": "C16", # 单元编号/组合编号
self.unit_id, # "ZHBH": "C16", # 组合编号
self.holder_ids.get(order.exchange), # "GDDM": "C20", # 股东代码
EXCHANGE_VT2PB.get(order.exchange), # "JYSC": "C3", # 交易市场
order.symbol, # "ZQDM": "C16", # 证券代码
direction, # "WTFX": "C4", # 委托方向
get_pb_order_type(order.exchange, order.type), # "WTJGLX": "C1", # 委托价格类型
round(order.price, 2), # "WTJG": "N11.4", # 委托价格
int(order.volume), # "WTSL": "N12", # 委托数量
local_orderid, # "WBZDYXH": "N9", # 第三方系统自定义号( 如果字符串不是数字,会报错,如果前面有0,自动去掉)
None, # "WTXH": "N8", # 委托序号 <-- 输出参数 -->
None, # "WTSBDM": "N8", # 委托失败代码
"", # "SBYY": "C254", # 失败原因
"", # "CLBZ": "C1", # 处理标志 <-- 内部自用字段 -->
"", # "BYZD": "C2", # 备用字段
0, # "WTJE": "N16.2", # 委托金额 <-- 扩充参数 -->
"", # "TSBS": "C64", # 特殊标识
"" # "YWBS": "C2", # 业务标识
)
try:
# 打开dbf文件=》table
table = dbf.Table(dbf_file)
# 读取、写入模式
table.open(dbf.READ_WRITE)
# 写入数据
table.append(data)
# 关闭dbf文件
table.close()
self.warning_dict.pop('send_order', None)
except Exception as ex:
err_msg = f'dbf添加发单记录异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('send_order', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'send_order': err_info})
return ""
# 设置状态为提交中
order.status = Status.SUBMITTING
# 添加待检查列表
self.unchecked_orderids.append(local_orderid)
# 登记并发送on_order事件
self.gateway.write_log(f'send_order,提交dbf委托:{order.__dict__}')
self.gateway.order_manager.on_order(order)
# 添加定时检查任务
if self.check_send_order_dbf not in self.gateway.query_functions:
self.gateway.write_log(f'添加扫描系统委托号任务到任务队列中')
self.gateway.query_functions.append(self.check_send_order_dbf)
return order.vt_orderid
def send_order_csv(self, req: OrderRequest):
"""csv文件格式委托"""
# 发生委托,才添加批量埋单接口的委托、成交检查
if self.query_update_trades_csv not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_trades_csv)
if self.query_update_orders_csv not in self.gateway.query_functions:
self.gateway.query_functions.append(self.query_update_orders_csv)
# 创建本地orderid
local_orderid = self.gateway.order_manager.new_local_orderid()
# req => order
order = req.create_order_data(orderid=local_orderid, gateway_name=self.gateway_name)
csv_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(PB_FILE_NAMES.get('send_order'), self.trading_date)))
# 股票买卖,强制offset = Offset.NONE
order.offset = Offset.NONE
contract = self.gateway.md_api.contract_dict.get(f'{order.symbol}.{order.exchange.value}')
direction = DIRECTION_STOCK_VT2PB.get((order.direction, order.offset)) # 委托方向
if contract:
if contract.product == Product.BOND:
if direction == '1':
direction = '3'
else:
direction = '4'
data = {
"CPBH": self.product_id, # 产品代码/基金代码 <-- 输入参数 -->
"ZCDYBH": self.unit_id, # 单元编号/组合编号
"ZHBH": self.unit_id, # 组合编号
"GDDM": self.holder_ids.get(order.exchange), # 股东代码
"JYSC": EXCHANGE_VT2PB.get(order.exchange), # 交易市场
"ZQDM": order.symbol, # 证券代码
"WTFX": direction,
"WTJGLX": get_pb_order_type(order.exchange, order.type), # 委托价格类型
"WTJG": round(order.price, 4), # 委托价格
"WTSL": int(order.volume), # 委托数量
"WBZDYXH": local_orderid # 第三方系统自定义号
}
# 更新所有字段得长度
order_data = format_dict(data, SEND_ORDER_FIELDS)
append_data(file_name=csv_file,
dict_data=order_data,
field_names=list(SEND_ORDER_FIELDS.keys()),
auto_header=False,
encoding='gbk')
# 设置状态为提交中
order.status = Status.SUBMITTING
# 添加待检查列表
self.unchecked_orderids.append(local_orderid)
# 登记并发送on_order事件
self.gateway.write_log(f'send_order,提交csv下单:{order.__dict__}')
self.gateway.order_manager.on_order(order)
# 添加定时检查任务
if self.check_send_order_csv not in self.gateway.query_functions:
self.gateway.write_log(f'添加定时检查到任务队列中')
self.gateway.query_functions.append(self.check_send_order_csv)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
if self.gateway.file_type == 'dbf':
return self.cancel_order_dbf(req)
else:
return self.cancel_order_csv(req)
def cancel_order_dbf(self, req: CancelRequest):
"""
dbf文件撤单
:param req:
:return:
"""
self.gateway.write_log(f'dbf委托撤单:{req.__dict__}')
try:
# 获取订单
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid=req.orderid)
# 订单不存在
if order is None:
self.gateway.write_error(f'订单{req.orderid}不存在, 撤单失败')
return False
# 或者已经全部成交,已经被拒单,已经撤单
if order.status in [Status.ALLTRADED, Status.REJECTED, Status.CANCELLING,
Status.CANCELLED]:
self.gateway.write_error(f'订单{req.orderid}存在, 状态为:{order.status}, 不能再撤单')
return False
sys_orderid = self.gateway.order_manager.get_sys_orderid(req.orderid)
if sys_orderid is None or len(sys_orderid) == 0 or sys_orderid == 'None':
self.gateway.write_error(f'订单{req.orderid}=》系统委托id:{sys_orderid}不存在,撤单失败')
return False
data = (
int(sys_orderid), # 委托序号
None, # "JYSC": "C3", # 交易市场
None, # "ZQDM": "C16", # 证券代码
None, # "CDCGBZ": "C1", # 撤单成功标志
None, # "SBYY": "C254", # 失败原因
None, # "CLBZ": "C1", # 处理标志
None, # "BYZD": "C2", # 备用字段
None # "BYZD2": "C16", # 备用字段2
)
dbf_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
# 打开dbf文件=》table
table = dbf.Table(dbf_file)
# 读取、写入模式
table.open(dbf.READ_WRITE)
# 写入数据
table.append(data)
# 关闭dbf文件
table.close()
self.warning_dict.pop('cancel_order', None)
return True
except Exception as ex:
err_msg = f'dbf委托撤单异常:{str(ex)}'
tra_msg = traceback.format_exc()
err_info = self.warning_dict.get('cancel_order', {})
err_count = err_info.get('err_count', 1)
if err_count > 10:
self.gateway.write_error(err_msg)
self.gateway.write_error(tra_msg)
else:
err_count += 1
err_info.update({'err_count': err_count, 'err_msg': err_msg, 'tra_msg': tra_msg})
self.warning_dict.update({'cancel_order': err_info})
return False
def cancel_order_csv(self, req: CancelRequest):
"""csv文件撤单"""
self.gateway.write_log(f'处理撤单请求{req.__dict__}')
# 获取订单
order = self.gateway.order_manager.get_order_with_local_orderid(local_orderid=req.orderid)
# 订单不存在
if order is None:
self.gateway.write_log(f'订单{req.orderid}不存在, 撤单失败')
return False
# 或者已经全部成交,已经被拒单,已经撤单
if order.status in [Status.ALLTRADED, Status.REJECTED, Status.CANCELLING,
Status.CANCELLED]:
self.gateway.write_log(f'订单{req.orderid}存在, 状态为:{order.status}, 不能再撤单')
return False
sys_orderid = self.gateway.order_manager.get_sys_orderid(req.orderid)
if len(sys_orderid) == 0:
self.gateway.write_log(f'订单{req.orderid}=》系统委托id不存在,撤单失败')
return False
data = {
"WTXH": sys_orderid, # 委托序号
}
# 更新所有字段得长度
cancel_data = format_dict(data, CANCEL_ORDER_FIELDS)
csv_file = os.path.abspath(os.path.join(self.order_folder,
'{}{}.csv'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
append_data(file_name=csv_file,
dict_data=cancel_data,
field_names=list(CANCEL_ORDER_FIELDS.keys()),
auto_header=False,
encoding='gbk')
return True
def cancel_all(self):
if self.gateway.file_type == 'dbf':
return self.cancel_all_dbf()
else:
return self.cancel_all_csv()
def cancel_all_dbf(self):
"""dbf文件全策略单d"""
# XHPT_WTCX委托的dbf文件
orders_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(
PB_FILE_NAMES.get('update_orders'),
self.trading_date)))
cancel_dbf = os.path.abspath(os.path.join(self.order_folder,
'{}{}.dbf'.format(PB_FILE_NAMES.get('cancel_order'),
self.trading_date)))
# dbf => 所有委托记录
try:
# dbf => 所有成交记录
self.gateway.write_log(f'全撤单,扫描所有委托查询记录:{orders_dbf}')
orders_table = dbf.Table(orders_dbf, codepage='cp936')
orders_table.open(dbf.READ_ONLY)
cancel_table = dbf.Table(cancel_dbf, codepage='cp936')
cancel_table.open(dbf.READ_WRITE)
for data in orders_table:
# 委托状态=>
order_status = STATUS_PB2VT.get(str(data.wtzt))
# 恒生平台返回的委托序号
sys_orderid = str(data.wtxh)
if order_status in [Status.NOTTRADED] and len(sys_orderid) > 0:
self.gateway.write_log(f'撤单:{data.wtxh}')
cancel_data = (int(sys_orderid), None, None, None, None, None, None, None)
cancel_table.append(cancel_data)
orders_table.close()
cancel_table.close()
except Exception as ex:
self.gateway.write_error(f'dbf全委托撤单异常:{str(ex)}')
self.gateway.write_error(traceback.format_exc())
return False
def cancel_all_csv(self):
pass
class TqMdApi():
"""天勤行情API"""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.api = None
self.is_connected = False
self.subscribe_array = []
# 行情对象列表
self.quote_objs = []
# 数据更新线程
self.update_thread = None
# 所有的合约
self.all_instruments = []
self.ticks = {}
def connect(self, setting={}):
""""""
if self.api and self.is_connected:
self.gateway.write_log(f'天勤行情已经接入,无需重新连接')
return
try:
from tqsdk import TqApi
self.api = TqApi(_stock=True, url="wss://api.shinnytech.com/t/nfmd/front/mobile")
except Exception as e:
self.gateway.write_log(f'天勤股票行情API接入异常:'.format(str(e)))
self.gateway.write_log(traceback.format_exc())
if self.api:
self.is_connected = True
self.gateway.write_log(f'天勤股票行情API已连接')
self.update_thread = Thread(target=self.update)
self.update_thread.start()
def generate_tick_from_quote(self, vt_symbol, quote) -> TickData:
"""
生成TickData
"""
# 清洗 nan
quote = {k: 0 if v != v else v for k, v in quote.items()}
symbol, exchange = extract_vt_symbol(vt_symbol)
return TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f"),
name=symbol,
volume=quote["volume"],
open_interest=quote["open_interest"],
last_price=quote["last_price"],
limit_up=quote["upper_limit"],
limit_down=quote["lower_limit"],
open_price=quote["open"],
high_price=quote["highest"],
low_price=quote["lowest"],
pre_close=quote["pre_close"],
bid_price_1=quote["bid_price1"],
bid_price_2=quote["bid_price2"],
bid_price_3=quote["bid_price3"],
bid_price_4=quote["bid_price4"],
bid_price_5=quote["bid_price5"],
ask_price_1=quote["ask_price1"],
ask_price_2=quote["ask_price2"],
ask_price_3=quote["ask_price3"],
ask_price_4=quote["ask_price4"],
ask_price_5=quote["ask_price5"],
bid_volume_1=quote["bid_volume1"],
bid_volume_2=quote["bid_volume2"],
bid_volume_3=quote["bid_volume3"],
bid_volume_4=quote["bid_volume4"],
bid_volume_5=quote["bid_volume5"],
ask_volume_1=quote["ask_volume1"],
ask_volume_2=quote["ask_volume2"],
ask_volume_3=quote["ask_volume3"],
ask_volume_4=quote["ask_volume4"],
ask_volume_5=quote["ask_volume5"],
gateway_name=self.gateway_name
)
def update(self) -> None:
"""
更新行情/委托/账户/持仓
"""
while self.api.wait_update():
# 更新行情信息
for vt_symbol, quote in self.quote_objs:
if self.api.is_changing(quote):
tick = self.generate_tick_from_quote(vt_symbol, quote)
tick and self.gateway.on_tick(tick) and self.gateway.on_custom_tick(tick)
def subscribe(self, req: SubscribeRequest) -> None:
"""
订阅行情
"""
if req.vt_symbol not in self.subscribe_array:
symbol, exchange = extract_vt_symbol(req.vt_symbol)
try:
quote = self.api.get_quote(f'{exchange.value}.{symbol}')
self.quote_objs.append((req.vt_symbol, quote))
self.subscribe_array.append(req.vt_symbol)
except Exception as ex:
self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)))
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""
获取历史数据
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
# 天勤需要的数据
tq_symbol = f'{exchange.value}.{symbol}'
tq_interval = INTERVAL_VT2TQ.get(interval)
end += timedelta(1)
total_days = end - start
# 一次最多只能下载 8964 根Bar
min_length = min(8964, total_days.days * 500)
df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values(
by=["datetime"]
)
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("close_oi", 0),
gateway_name=self.gateway_name,
)
data.append(bar)
return data
def close(self) -> None:
""""""
try:
if self.api and self.api.wait_update():
self.api.close()
self.is_connected = False
if self.update_thread:
self.update_thread.join()
except Exception as e:
self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)))
|
msincenselee/vnpy
|
vnpy/gateway/pb/pb_gateway.py
|
Python
|
mit
| 112,435
|
"""
Copyright 2018-present Engie SA.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import base64
import xml.etree.ElementTree as ET
import requests
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import gimme_aws_creds.common as commondef
from . import errors
class AwsResolver(object):
"""
The Aws Client Class performes post request on AWS sign-in page
to fetch friendly names/alias for account and IAM roles
"""
def __init__(self, verify_ssl_certs=True):
"""
:param verify_ssl_certs: Enable/disable SSL verification
"""
self._verify_ssl_certs = verify_ssl_certs
if verify_ssl_certs is False:
requests.packages.urllib3.disable_warnings()
# Allow up to 5 retries on requests to AWS in case we have network issues
self._http_client = requests.Session()
retries = Retry(total=5, backoff_factor=1,
method_whitelist=['POST'])
self._http_client.mount('https://', HTTPAdapter(max_retries=retries))
def get_signinpage(self, saml_token, saml_target_url):
""" Post SAML token to aws sign in page and get back html result"""
payload = {
'SAMLResponse': saml_token,
'RelayState': ''
}
response = self._http_client.post(
saml_target_url,
data=payload,
verify=self._verify_ssl_certs
)
return response.text
def _enumerate_saml_roles(self, assertion, saml_target_url):
signin_page = self.get_signinpage(assertion, saml_target_url)
""" using the assertion to fetch aws sign-in page, parse it and return aws sts creds """
role_pairs = []
root = ET.fromstring(base64.b64decode(assertion))
for saml2_attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):
if saml2_attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role':
for saml2_attribute_value in saml2_attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):
role_pairs.append(saml2_attribute_value.text)
# build a temp hash table
table = {}
for role_pair in role_pairs:
idp, role = None, None
for field in role_pair.split(','):
if 'saml-provider' in field:
idp = field
elif 'role' in field:
role = field
if not idp or not role:
raise errors.GimmeAWSCredsError('Parsing error on {}'.format(role_pair))
else:
table[role] = idp
# init parser
soup = BeautifulSoup(signin_page, 'html.parser')
# find all roles
roles = soup.find_all("div", attrs={"class": "saml-role"})
# Normalize pieces of string;
result = []
# Return role if no Roles are present
if not roles:
role = next(iter(table))
idp = table[role]
result.append(commondef.RoleSet(idp=idp, role=role, friendly_account_name='SingleAccountName', friendly_role_name='SingleRole'))
return result
for role_item in roles:
idp, role, friendly_account_name, friendly_role_name = None, None, None, None
role = role_item.label['for']
idp = table[role]
friendly_account_name = role_item.parent.parent.find("div").find("div").get_text()
friendly_role_name = role_item.label.get_text()
result.append(commondef.RoleSet(idp=idp, role=role, friendly_account_name=friendly_account_name, friendly_role_name=friendly_role_name))
return result
@staticmethod
def _display_role(roles):
""" gets a list of available roles and
asks the user to select the role they want to assume
"""
# Gather the roles available to the user.
role_strs = []
last_account = None
for i, role in enumerate(roles):
if not role:
continue
current_account = role.friendly_account_name
if not current_account == last_account:
role_strs.append(current_account)
last_account = current_account
role_strs.append(' [ {} ]: {}'.format(i, role.friendly_role_name))
return role_strs
|
epierce/gimme-aws-creds
|
gimme_aws_creds/aws.py
|
Python
|
apache-2.0
| 4,984
|
# vim:set fileencoding=utf-8
import unittest
from unittest.mock import patch, sentinel, DEFAULT
from blivet.actionlist import ActionList
from blivet.deviceaction import ActionDestroyFormat
from blivet.devices import DiskDevice
from blivet.devices import DiskFile
from blivet.devices import LVMLogicalVolumeDevice
from blivet.devices import LVMVolumeGroupDevice
from blivet.devices import PartitionDevice
from blivet.devicetree import DeviceTree
from blivet.formats import get_format
from blivet.size import Size
from blivet.util import sparsetmpfile
class UnsupportedDiskLabelTestCase(unittest.TestCase):
def setUp(self):
disk1 = DiskDevice("testdisk", size=Size("300 GiB"), exists=True,
fmt=get_format("disklabel", exists=True))
disk1.format._supported = False
with self.assertLogs("blivet", level="INFO") as cm:
partition1 = PartitionDevice("testpart1", size=Size("150 GiB"), exists=True,
parents=[disk1], fmt=get_format("ext4", exists=True))
self.assertTrue("disklabel is unsupported" in "\n".join(cm.output))
with self.assertLogs("blivet", level="INFO") as cm:
partition2 = PartitionDevice("testpart2", size=Size("100 GiB"), exists=True,
parents=[disk1], fmt=get_format("lvmpv", exists=True))
self.assertTrue("disklabel is unsupported" in "\n".join(cm.output))
# To be supported, all of a devices ancestors must be supported.
disk2 = DiskDevice("testdisk2", size=Size("300 GiB"), exists=True,
fmt=get_format("lvmpv", exists=True))
vg = LVMVolumeGroupDevice("testvg", exists=True, parents=[partition2, disk2])
lv = LVMLogicalVolumeDevice("testlv", exists=True, size=Size("64 GiB"),
parents=[vg], fmt=get_format("ext4", exists=True))
with sparsetmpfile("addparttest", Size("50 MiB")) as disk_file:
disk3 = DiskFile(disk_file)
disk3.format = get_format("disklabel", device=disk3.path, exists=False)
self.disk1 = disk1
self.disk2 = disk2
self.disk3 = disk3
self.partition1 = partition1
self.partition2 = partition2
self.vg = vg
self.lv = lv
def test_unsupported_disklabel(self):
""" Test behavior of partitions on unsupported disklabels. """
# Verify basic properties of the disk and disklabel.
self.assertTrue(self.disk1.partitioned)
self.assertFalse(self.disk1.format.supported)
self.assertTrue(self.disk3.partitioned)
self.assertTrue(self.disk3.format.supported) # normal disklabel is supported
# Verify some basic properties of the partitions.
self.assertFalse(self.partition1.disk.format.supported)
self.assertFalse(self.partition2.disk.format.supported)
self.assertEqual(self.partition1.disk, self.disk1)
self.assertEqual(self.partition2.disk, self.disk1)
self.assertIsNone(self.partition1.parted_partition)
self.assertIsNone(self.partition2.parted_partition)
self.assertFalse(self.partition1.is_magic)
self.assertFalse(self.partition2.is_magic)
# Verify that probe returns without changing anything.
partition1_type = sentinel.partition1_type
self.partition1._part_type = partition1_type
self.partition1.probe()
self.assertEqual(self.partition1.part_type, partition1_type)
self.partition1._part_type = None
# partition1 is not resizable even though it contains a resizable filesystem
self.assertEqual(self.partition1.resizable, False)
# lv is resizable as usual
with patch.object(self.lv.format, "_resizable", new=True):
self.assertEqual(self.lv.resizable, True)
# the lv's destroy method should call blockdev.lvm.lvremove as usual
with patch.object(self.lv, "_pre_destroy"):
with patch("blivet.devices.lvm.blockdev.lvm.lvremove") as lvremove:
self.lv.destroy()
self.assertTrue(lvremove.called)
# the vg's destroy method should call blockdev.lvm.vgremove as usual
with patch.object(self.vg, "_pre_destroy"):
with patch.multiple("blivet.devices.lvm.blockdev.lvm",
vgreduce=DEFAULT,
vgdeactivate=DEFAULT,
vgremove=DEFAULT) as mocks:
self.vg.destroy()
self.assertTrue(mocks["vgreduce"].called)
self.assertTrue(mocks["vgdeactivate"].called)
self.assertTrue(mocks["vgremove"].called)
# the partition's destroy method shouldn't try to call any disklabel methods
with patch.object(self.partition2, "_pre_destroy"):
with patch.object(self.partition2.disk, "original_format") as disklabel:
self.partition2.destroy()
self.assertEqual(len(disklabel.mock_calls), 0)
self.assertTrue(self.partition2.exists)
# Destroying the disklabel should set all partitions to non-existing.
# XXX This part is handled by ActionList.
actions = ActionList()
unsupported_disklabel = self.disk1.format
actions.add(ActionDestroyFormat(self.disk1))
self.assertTrue(self.disk1.format.exists)
self.assertTrue(self.partition1.exists)
self.assertTrue(self.partition2.exists)
with patch.object(unsupported_disklabel, "_pre_destroy"):
with patch.object(unsupported_disklabel, "_destroy") as destroy:
with patch.object(actions, "_pre_process"):
with patch.object(actions, "_post_process"):
actions.process(devices=[self.partition1, self.partition2, self.disk1])
self.assertTrue(destroy.called)
self.assertFalse(unsupported_disklabel.exists)
self.assertFalse(self.partition1.exists)
self.assertFalse(self.partition2.exists)
def test_recursive_remove(self):
devicetree = DeviceTree()
devicetree._add_device(self.disk1)
devicetree._add_device(self.partition1)
devicetree._add_device(self.partition2)
devicetree._add_device(self.disk2)
devicetree._add_device(self.vg)
devicetree._add_device(self.lv)
self.assertIn(self.disk1, devicetree.devices)
self.assertIn(self.partition1, devicetree.devices)
self.assertIn(self.lv, devicetree.devices)
self.assertEqual(devicetree.get_device_by_name(self.disk1.name), self.disk1)
self.assertIsNotNone(devicetree.get_device_by_name(self.partition1.name))
self.assertIsNotNone(devicetree.get_device_by_name(self.partition1.name, hidden=True))
self.assertIsNotNone(devicetree.get_device_by_name(self.lv.name, hidden=True))
self.assertIsNotNone(devicetree.get_device_by_path(self.lv.path, hidden=True))
self.assertIsNotNone(devicetree.get_device_by_id(self.partition2.id, hidden=True,
incomplete=True))
self.assertEqual(len(devicetree.get_dependent_devices(self.disk1)), 4)
with patch('blivet.devicetree.ActionDestroyFormat.apply'):
devicetree.recursive_remove(self.disk1)
self.assertTrue(self.disk1 in devicetree.devices)
self.assertFalse(self.partition1 in devicetree.devices)
self.assertFalse(self.partition2 in devicetree.devices)
self.assertFalse(self.vg in devicetree.devices)
self.assertFalse(self.lv in devicetree.devices)
|
rhinstaller/blivet
|
tests/unsupported_disklabel_test.py
|
Python
|
lgpl-2.1
| 7,672
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations with the Extra DHCP Options Neutron API
extension:
port create
port list
port show
port update
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
DHCP Options extension is enabled in the [network-feature-enabled]
section of etc/tempest.conf
"""
@classmethod
def resource_setup(cls):
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
else '2015::dead')
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
]
@test.attr(type='smoke')
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
# Create a port with Extra DHCP Options
body = self.client.create_port(
network_id=self.network['id'],
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
# Confirm port created has Extra DHCP Options
body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
@test.attr(type='smoke')
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
body = self.client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
body = self.client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
retrieved = port['extra_dhcp_opts']
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
if (retrieved_option['opt_value'] == option['opt_value'] and
retrieved_option['opt_name'] == option['opt_name']):
break
else:
self.fail('Extra DHCP option not found in port %s' %
str(retrieved_option))
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
_ip_version = 6
|
miyakz1192/neutron
|
neutron/tests/api/test_extra_dhcp_options.py
|
Python
|
apache-2.0
| 4,030
|
# coding=utf-8
# Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flaubert configuration, based on XLM."""
from ...utils import logging
from ..xlm.configuration_xlm import XLMConfig
logger = logging.get_logger(__name__)
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/config.json",
"flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/config.json",
"flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/config.json",
"flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/config.json",
}
class FlaubertConfig(XLMConfig):
"""
This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to apply the layer normalization before or after the feed forward layer following the attention in
each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
layerdrop (`float`, *optional*, defaults to 0.0):
Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
Structured Dropout. ICLR 2020)
vocab_size (`int`, *optional*, defaults to 30145):
Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
emb_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the encoder layers and the pooler layer.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention mechanism
gelu_activation (`bool`, *optional*, defaults to `True`):
Whether or not to use a *gelu* activation instead of *relu*.
sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
causal (`bool`, *optional*, defaults to `False`):
Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
order to only attend to the left-side context instead if a bidirectional context.
asm (`bool`, *optional*, defaults to `False`):
Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
layer.
n_langs (`int`, *optional*, defaults to 1):
The number of languages the model handles. Set to 1 for monolingual models.
use_lang_emb (`bool`, *optional*, defaults to `True`)
Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
on how to use them.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
init_std (`int`, *optional*, defaults to 50257):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
embedding matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
bos_index (`int`, *optional*, defaults to 0):
The index of the beginning of sentence token in the vocabulary.
eos_index (`int`, *optional*, defaults to 1):
The index of the end of sentence token in the vocabulary.
pad_index (`int`, *optional*, defaults to 2):
The index of the padding token in the vocabulary.
unk_index (`int`, *optional*, defaults to 3):
The index of the unknown token in the vocabulary.
mask_index (`int`, *optional*, defaults to 5):
The index of the masking token in the vocabulary.
is_encoder(`bool`, *optional*, defaults to `True`):
Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
summary_type (`string`, *optional*, defaults to "first"):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Used in the sequence classification and multiple choice models.
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
start_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
end_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
mask_token_id (`int`, *optional*, defaults to 0):
Model agnostic parameter to identify masked tokens when generating text in an MLM context.
lang_id (`int`, *optional*, defaults to 1):
The ID of the language used by the model. This parameter is used when generating text in a given language.
"""
model_type = "flaubert"
def __init__(self, layerdrop=0.0, pre_norm=False, pad_token_id=2, bos_token_id=0, **kwargs):
"""Constructs FlaubertConfig."""
self.layerdrop = layerdrop
self.pre_norm = pre_norm
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
|
huggingface/transformers
|
src/transformers/models/flaubert/configuration_flaubert.py
|
Python
|
apache-2.0
| 8,543
|
#!/usr/bin/env python
"""
Demonstrate use of pysnmp walks
"""
import sys
import re
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
devip = sys.argv.pop(1)
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.nextCmd(
cmdgen.CommunityData('server', 'galileo', 1),
cmdgen.UdpTransportTarget((devip, 161)),
cmdgen.MibVariable('IF-MIB', '').loadMibs(),
lexicographicMode=True, maxRows=150
)
if errorIndication:
print errorIndication
else:
if errorStatus:
print '%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1] or '?'
)
else:
ifdescr = []
inoctets = []
outoctets = []
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
np = name.prettyPrint()
vp = val.prettyPrint()
if re.search(r"ifDescr\.\d+", np):
ifdescr.append(vp)
continue
if re.search(r"ifInOctets\.\d+", np):
inoctets.append(vp)
continue
if re.search(r"ifOutOctets\.\d+", np):
outoctets.append(vp)
for l in zip(ifdescr, inoctets, outoctets):
print "%s\t%s\t%s" %(l[0], l[1], l[2])
|
patrebert/pynet_cert
|
class2/walk2.py
|
Python
|
apache-2.0
| 1,368
|
"""0MQ Error classes and functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class ZMQBaseError(Exception):
"""Base exception class for 0MQ errors in Python."""
pass
class ZMQError(ZMQBaseError):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
errno = None
def __init__(self, errno=None, msg=None):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
from zmq.backend import strerror, zmq_errno
if errno is None:
errno = zmq_errno()
if isinstance(errno, int):
self.errno = errno
if msg is None:
self.strerror = strerror(errno)
else:
self.strerror = msg
else:
if msg is None:
self.strerror = str(errno)
else:
self.strerror = msg
# flush signals, because there could be a SIGINT
# waiting to pounce, resulting in uncaught exceptions.
# Doing this here means getting SIGINT during a blocking
# libzmq call will raise a *catchable* KeyboardInterrupt
# PyErr_CheckSignals()
def __str__(self):
return self.strerror
def __repr__(self):
return "ZMQError('%s')"%self.strerror
class ZMQBindError(ZMQBaseError):
"""An error for ``Socket.bind_to_random_port()``.
See Also
--------
.Socket.bind_to_random_port
"""
pass
class NotDone(ZMQBaseError):
"""Raised when timeout is reached while waiting for 0MQ to finish with a Message
See Also
--------
.MessageTracker.wait : object for tracking when ZeroMQ is done
"""
pass
class ContextTerminated(ZMQError):
"""Wrapper for zmq.ETERM
.. versionadded:: 13.0
"""
pass
class Again(ZMQError):
"""Wrapper for zmq.EAGAIN
.. versionadded:: 13.0
"""
pass
def _check_rc(rc, errno=None):
"""internal utility for checking zmq return condition
and raising the appropriate Exception class
"""
if rc < 0:
from zmq.backend import zmq_errno
if errno is None:
errno = zmq_errno()
from zmq import EAGAIN, ETERM
if errno == EAGAIN:
raise Again(errno)
elif errno == ETERM:
raise ContextTerminated(errno)
else:
raise ZMQError(errno)
_zmq_version_info = None
_zmq_version = None
class ZMQVersionError(NotImplementedError):
"""Raised when a feature is not provided by the linked version of libzmq.
.. versionadded:: 14.2
"""
min_version = None
def __init__(self, min_version, msg='Feature'):
global _zmq_version
if _zmq_version is None:
from zmq import zmq_version
_zmq_version = zmq_version()
self.msg = msg
self.min_version = min_version
self.version = _zmq_version
def __repr__(self):
return "ZMQVersionError('%s')" % str(self)
def __str__(self):
return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
def _check_version(min_version_info, msg='Feature'):
"""Check for libzmq
raises ZMQVersionError if current zmq version is not at least min_version
min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
"""
global _zmq_version_info
if _zmq_version_info is None:
from zmq import zmq_version_info
_zmq_version_info = zmq_version_info()
if _zmq_version_info < min_version_info:
min_version = '.'.join(str(v) for v in min_version_info)
raise ZMQVersionError(min_version, msg)
__all__ = [
'ZMQBaseError',
'ZMQBindError',
'ZMQError',
'NotDone',
'ContextTerminated',
'Again',
'ZMQVersionError',
]
|
ellisonbg/pyzmq
|
zmq/error.py
|
Python
|
lgpl-3.0
| 4,885
|
#!/usr/bin/env python
from siconos.tests_setup import working_dir
import os
def test_bouncing_ball1():
from siconos.kernel import LagrangianLinearTIDS, NewtonImpactNSL, \
LagrangianLinearTIR, Interaction, Model, MoreauJeanOSI, TimeDiscretisation, LCP, TimeStepping
from numpy import array, eye, empty
t0 = 0 # start time
T = 10 # end time
h = 0.005 # time step
r = 0.1 # ball radius
g = 9.81 # gravity
m = 1 # ball mass
e = 0.9 # restitution coeficient
theta = 0.5 # theta scheme
#
# dynamical system
#
x = array([1, 0, 0]) # initial position
v = array([0, 0, 0]) # initial velocity
mass = eye(3) # mass matrix
mass[2, 2] = 3./5 * r * r
# the dynamical system
ball = LagrangianLinearTIDS(x, v, mass)
# set external forces
weight = array([-m * g, 0, 0])
ball.setFExtPtr(weight)
#
# Interactions
#
# ball-floor
H = array([[1, 0, 0]])
nslaw = NewtonImpactNSL(e)
relation = LagrangianLinearTIR(H)
inter = Interaction(1, nslaw, relation)
#
# Model
#
bouncingBall = Model(t0, T)
# add the dynamical system to the non smooth dynamical system
bouncingBall.nonSmoothDynamicalSystem().insertDynamicalSystem(ball)
# link the interaction and the dynamical system
bouncingBall.nonSmoothDynamicalSystem().link(inter, ball)
#
# Simulation
#
# (1) OneStepIntegrators
OSI = MoreauJeanOSI(theta)
# (2) Time discretisation --
t = TimeDiscretisation(t0, h)
# (3) one step non smooth problem
osnspb = LCP()
# (4) Simulation setup with (1) (2) (3)
s = TimeStepping(t)
s.insertIntegrator(OSI)
s.insertNonSmoothProblem(osnspb)
# end of model definition
#
# computation
#
# simulation initialization
bouncingBall.initialize(s)
#
# save and load data from xml and .dat
#
try:
from siconos.io import save
save(bouncingBall, "bouncingBall.xml")
save(bouncingBall, "bouncingBall.bin")
except:
print("Warning : could not import save from siconos.io")
# the number of time steps
N = (T-t0)/h+1
# Get the values to be plotted
# ->saved in a matrix dataPlot
dataPlot = empty((N, 5))
#
# numpy pointers on dense Siconos vectors
#
q = ball.q()
v = ball.velocity()
p = ball.p(1)
lambda_ = inter.lambda_(1)
#
# initial data
#
dataPlot[0, 0] = t0
dataPlot[0, 1] = q[0]
dataPlot[0, 2] = v[0]
dataPlot[0, 3] = p[0]
dataPlot[0, 4] = lambda_[0]
k = 1
# time loop
while(s.hasNextEvent()):
s.computeOneStep()
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = p[0]
dataPlot[k, 4] = lambda_[0]
k += 1
#print(s.nextTime())
s.nextStep()
#
# comparison with the reference file
#
from siconos.kernel import SimpleMatrix, getMatrix
from numpy.linalg import norm
ref = getMatrix(SimpleMatrix(os.path.join(working_dir, "data/result.ref")))
assert (norm(dataPlot - ref) < 1e-12)
def xtest_bouncing_ball_from_xml():
assert False # just have to load from xml...
def xtest_bouncing_ball_from_binary():
assert False # just have to load from .dat...
def test_bouncing_ball2():
import siconos.kernel as K
from numpy import array, eye, empty
t0 = 0 # start time
T = 5 # end time
h = 0.005 # time step
r = 0.1 # ball radius
g = 9.81 # gravity
m = 1 # ball mass
e = 0.9 # restitution coeficient
theta = 0.5 # theta scheme
#
# dynamical system
#
x = array([1, 0, 0]) # initial position
v = array([0, 0, 0]) # initial velocity
mass = eye(3) # mass matrix
mass[2, 2] = 3./5 * r * r
# the dynamical system
ball = K.LagrangianLinearTIDS(x, v, mass)
# set external forces
weight = array([-m * g, 0, 0])
ball.setFExtPtr(weight)
# a ball with its own computeFExt
class Ball(K.LagrangianLinearTIDS):
def computeFExt(self, t):
#print("computing FExt at t=", t)
weight = array([-m * g, 0, 0])
self.setFExtPtr(weight)
ball_d = Ball(array([1, 0, 0]), array([0, 0, 0]), mass)
ball_d.setFExtPtr(array([0, 0, 0]))
#
# Interactions
#
# ball-floor
H = array([[1, 0, 0]])
nslaw = K.NewtonImpactNSL(e)
nslaw_d = K.NewtonImpactNSL(e)
relation = K.LagrangianLinearTIR(H)
relation_d = K.LagrangianLinearTIR(H)
inter = K.Interaction(1, nslaw, relation)
inter_d = K.Interaction(1, nslaw_d, relation_d)
#
# Model
#
bouncingBall = K.Model(t0, T)
bouncingBall_d = K.Model(t0, T)
# add the dynamical system to the non smooth dynamical system
bouncingBall.nonSmoothDynamicalSystem().insertDynamicalSystem(ball)
bouncingBall_d.nonSmoothDynamicalSystem().insertDynamicalSystem(ball_d)
# link the interaction and the dynamical system
bouncingBall.nonSmoothDynamicalSystem().link(inter, ball)
bouncingBall_d.nonSmoothDynamicalSystem().link(inter_d, ball_d)
#
# Simulation
#
# (1) OneStepIntegrators
OSI = K.MoreauJeanOSI(theta)
OSI_d = K.MoreauJeanOSI(theta)
# (2) Time discretisation --
t = K.TimeDiscretisation(t0, h)
t_d = K.TimeDiscretisation(t0, h)
# (3) one step non smooth problem
osnspb = K.LCP()
osnspb_d = K.LCP()
# (4) Simulation setup with (1) (2) (3)
s = K.TimeStepping(t)
s.insertIntegrator(OSI)
s.insertNonSmoothProblem(osnspb)
s_d = K.TimeStepping(t_d)
s_d.insertIntegrator(OSI_d)
s_d.insertNonSmoothProblem(osnspb_d)
# end of model definition
#
# computation
#
# simulation initialization
bouncingBall.initialize(s)
bouncingBall_d.initialize(s_d)
# the number of time steps
N = (T-t0)/h+1
# Get the values to be plotted
# ->saved in a matrix dataPlot
s_d.computeOneStep()
dataPlot = empty((N+1, 5))
dataPlot_d = empty((N+1, 5))
dataPlot[0, 0] = t0
dataPlot[0, 1] = ball.q()[0]
dataPlot[0, 2] = ball.velocity()[0]
dataPlot[0, 3] = ball.p(1)[0]
dataPlot[0, 4] = inter.lambda_(1)
dataPlot_d[0, 0] = t0
dataPlot_d[0, 1] = ball_d.q()[0]
dataPlot_d[0, 2] = ball_d.velocity()[0]
dataPlot_d[0, 3] = ball_d.p(1)[0]
dataPlot_d[0, 4] = inter_d.lambda_(1)
k = 1
# time loop
while(s.hasNextEvent()):
s.computeOneStep()
s_d.computeOneStep()
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = ball.q()[0]
dataPlot[k, 2] = ball.velocity()[0]
dataPlot[k, 3] = ball.p(1)[0]
dataPlot[k, 4] = inter.lambda_(1)[0]
dataPlot_d[k, 0] = s_d.nextTime()
dataPlot_d[k, 1] = ball_d.q()[0]
dataPlot_d[k, 2] = ball_d.velocity()[0]
dataPlot_d[k, 3] = ball_d.p(1)[0]
dataPlot_d[k, 4] = inter_d.lambda_(1)[0]
assert dataPlot[k, 1] == dataPlot_d[k, 1]
#print(s.nextTime())
k += 1
s.nextStep()
s_d.nextStep()
|
siconos/siconos-deb
|
kernel/swig/tests/test_bouncing_ball.py
|
Python
|
apache-2.0
| 7,255
|
from config import *
import scripts.butil
def run_TLD(seq, rp, bSaveImage):
global m
if m == None:
print 'Starting matlab engine...'
m = matlab.engine.start_matlab()
m.addpath(m.genpath('.', nargout=1), nargout=0)
seq.init_rect = matlab.double(seq.init_rect)
m.workspace['subS'] = seq.__dict__
m.workspace['rp'] = os.path.abspath(rp)
m.workspace['bSaveImage'] = bSaveImage
func = 'run_TLD(subS, rp, bSaveImage);'
res = m.eval(func, nargout=1)
res['res'] = scripts.butil.matlab_double_to_py_float(res['res'])
# m.quit()
return res
|
stevenwudi/Kernelized_Correlation_Filter
|
scripts/bscripts/run_TLD.py
|
Python
|
gpl-3.0
| 595
|
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
import simplejson as json
except ImportError:
import json
import geojson
GEO_INTERFACE_MARKER = "__geo_interface__"
def is_mapping(obj):
"""
Checks if the object is an instance of MutableMapping.
:param obj: Object to be checked.
:return: Truth value of whether the object is an instance of
MutableMapping.
:rtype: bool
"""
return isinstance(obj, MutableMapping)
def to_mapping(obj):
mapping = getattr(obj, GEO_INTERFACE_MARKER, None)
if mapping is not None:
return mapping
if is_mapping(obj):
return obj
if isinstance(obj, geojson.GeoJSON):
return dict(obj)
return json.loads(json.dumps(obj))
|
flavour/eden
|
modules/geojson/mapping.py
|
Python
|
mit
| 812
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hook', '0003_add_subset_fields_to_hook_model'),
]
operations = [
migrations.AddField(
model_name='hook',
name='payload_template',
field=models.TextField(null=True, blank=True),
),
]
|
kobotoolbox/kpi
|
kobo/apps/hook/migrations/0004_add_payload_template.py
|
Python
|
agpl-3.0
| 385
|
for i in range(1, 3):
# print(i) # error, need at least one space
print(i)
print('the same block')
print('completed')
|
matiishyn/py-learning
|
2-program-flow-control/1-intro.py
|
Python
|
mit
| 125
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2009-2010 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Graphical Reports/Familial Tree
Reports/Graphical Reports/Personal Tree
"""
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import sgettext as _
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import TextOption
from gramps.gen.plug.menu import NumberOption
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.menu import StringOption
from gramps.gen.plug.menu import BooleanOption
from gramps.gen.plug.menu import PersonOption
from gramps.gen.plug.menu import FamilyOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
PT2CM = ReportUtils.pt2cm
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_BORN = _('short for born|b.')
_DIED = _('short for died|d.')
_MARR = _('short for married|m.')
_RPT_NAME = 'descend_chart'
from gramps.plugins.lib.libtreebase import *
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class DescendantBoxBase(BoxBase):
"""
Base for all descendant boxes.
Set the boxstr and some new attributes that are needed
"""
def __init__(self, boxstr):
BoxBase.__init__(self)
self.boxstr = boxstr
self.next = None
self.father = None
def calc_text(self, database, person, family):
""" A single place to calculate box text """
gui = GuiConnect()
calc = gui.calc_lines(database)
self.text = calc.calc_lines(person, family,
gui.working_lines(self))
class PersonBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, boldable = 0):
DescendantBoxBase.__init__(self, "CG2-box")
self.level = level
def set_bold(self):
""" update me to a bolded box """
self.boxstr = "CG2b-box"
class FamilyBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level):
DescendantBoxBase.__init__(self, "CG2-fam-box")
self.level = level
class PlaceHolderBox(BoxBase):
"""
I am a box that does not print. I am used to make sure information
does not run over areas that we don't want information (boxes)
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "None"
self.level = level
self.line_to = None
self.next = None
def calc_text(self, database, person, family):
""" move along. Nothing to see here """
return
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class DescendantTitleBase(TitleBox):
def __init__(self, dbase, doc, boxstr = "CG2-Title"):
TitleBox.__init__(self, doc, boxstr)
self.database = dbase
def descendant_print(self, person_list, person_list2 = []):
""" calculate the Descendant title
Person_list will always be passed
If in the Family reports and there are two families, person_list2
will be used.
"""
if len(person_list) == len(person_list2) == 1:
person_list = person_list + person_list2
person_list2 = []
names = self._get_names(person_list)
if person_list2:
names2 = self._get_names(person_list2)
if len(names) + len(names2) == 3:
if len(names) == 1:
title = _("Descendant Chart for %(person)s and "
"%(father1)s, %(mother1)s") % \
{'person': names[0],
'father1': names2[0],
'mother1': names2[1],
}
else: # Should be 2 items in names list
title = _("Descendant Chart for %(person)s, %(father1)s "
"and %(mother1)s") % \
{'father1': names[0],
'mother1': names[1],
'person': names2[0],
}
else: # Should be 2 items in both names and names2 lists
title = _("Descendant Chart for %(father1)s, %(father2)s "
"and %(mother1)s, %(mother2)s") % \
{'father1': names[0],
'mother1': names[1],
'father2': names2[0],
'mother2': names2[1],
}
else: # No person_list2: Just one family
if len(names) == 1:
title = _("Descendant Chart for %(person)s") % \
{'person': names[0]}
else: # Should be two items in names list
title = _("Descendant Chart for %(father)s and %(mother)s") % \
{'father': names[0],
'mother': names[1],
}
return title
def get_parents(self, family_id):
""" For a family_id, return the father and mother """
family1 = self.database.get_family_from_gramps_id(family_id)
father_h = family1.get_father_handle()
mother_h = family1.get_mother_handle()
parents = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parents
class TitleNone(TitleNoDisplay):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc):
TitleNoDisplay.__init__(self, doc, "CG2-Title")
def calc_title(self, persons):
"""Calculate the title of the report"""
self.text = 'Descendant Graph'
class TitleDPY(DescendantTitleBase):
"""Descendant (Person yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
family2_h = center.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
person_list = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
person_list = [self.database.get_person_from_handle(handle)
for handle in [father2_h, mother2_h] if handle]
if not person_list:
person_list = [center]
self.text = self.descendant_print(person_list)
self.set_box_height_width()
class TitleDPN(DescendantTitleBase):
"""Descendant (Person no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
title = self.descendant_print([center])
self.text = title
self.set_box_height_width()
class TitleDFY(DescendantTitleBase):
"""Descendant (Family yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def get_parent_list(self, person):
""" return a list of my parents. If none, return me """
if not person:
return None
parent_list = None
family_h = person.get_main_parents_family_handle()
family = self.database.get_family_from_handle(family_h)
if family: #family = fathers parents
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
parent_list = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parent_list or [person]
def calc_title(self, family_id):
"""Calculate the title of the report"""
my_parents = self.get_parents(family_id)
dad_parents = self.get_parent_list(my_parents[0])
mom_parents = []
if len(my_parents) > 1:
if not dad_parents:
dad_parents = self.get_parent_list(my_parents[1])
else:
mom_parents = self.get_parent_list(my_parents[1])
self.text = self.descendant_print(dad_parents, mom_parents)
self.set_box_height_width()
class TitleDFN(DescendantTitleBase):
"""Descendant (Family no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
self.text = self.descendant_print(
self.get_parents(family_id) )
self.set_box_height_width()
class TitleF(DescendantTitleBase):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
parents = self.get_parents(family_id)
names = self._get_names(parents)
if len(parents) == 1:
title = _("Family Chart for %(person)s") % {'person': names[0] }
elif len(parents) == 2:
title = _("Family Chart for %(father1)s and %(mother1)s") % \
{'father1': names[0], 'mother1': names[1] }
#else:
# title = str(tmp) + " " + str(len(tmp))
self.text = title
self.set_box_height_width()
class TitleC(DescendantTitleBase):
"""Cousin Chart Title class for the report """
def __init__(self, dbase, doc):
DescendantTitleBase.__init__(self, dbase, doc)
def calc_title(self, family_id):
"""Calculate the title of the report"""
family = self.database.get_family_from_gramps_id(family_id)
kids = [self.database.get_person_from_handle(kid.ref)
for kid in family.get_child_ref_list()]
#ok we have the children. Make a title off of them
tmp = self._get_names(kids)
self.text = _("Cousin Chart for " + ", ".join(self._get_names(kids)))
self.set_box_height_width()
#------------------------------------------------------------------------
#
# Class RecurseDown
#
#------------------------------------------------------------------------
class RecurseDown:
"""
The main recursive functions that will use add_person to make
the tree of people (Descendants) to be included within the report.
"""
def __init__(self, dbase, canvas):
self.database = dbase
self.canvas = canvas
self.families_seen = set()
self.cols = []
self.__last_direct = []
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.max_generations = gui.get_val('maxgen')
self.max_spouses = gui.get_val('maxspouse')
self.inlc_marr = gui.get_val("inc_marr")
if not self.max_spouses:
self.inlc_marr = False
#is the option even available?
self.bold_direct = gui.get_val('bolddirect')
#can we bold direct descendants?
#bold_now will have only three values
#0 - no bolding
#1 - Only bold the first person
#2 - Bold all direct descendants
self.bold_now = 0
gui = None
def add_to_col(self, box):
"""
Add the box to a column on the canvas. we will do these things:
set the .next attrib for the boxs in this col
get the height and width of this box and set it no the column
also we set the .x_cm to any s_level (indentation) here
we will calculate the real .x_cm later (with indentation)
"""
level = box.level[0]
#make the column list of people
while len(self.cols) <= level:
self.cols.append(None)
self.__last_direct.append(None)
if self.cols[level]: #if (not the first box in this column)
last_box = self.cols[level]
last_box.next = box
#calculate the .y_cm for this box.
box.y_cm = last_box.y_cm
box.y_cm += last_box.height
if last_box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_shadow
if box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_pgap
else:
box.y_cm += self.canvas.report_opts.box_mgap
if box.level[1] == 0 and self.__last_direct[level]:
#ok, a new direct descendant.
#print level, box.father is not None, self.__last_direct[level].father is not None, box.text[0], \
# self.__last_direct[level].text[0]
if box.father != self.__last_direct[level].father and \
box.father != self.__last_direct[level]:
box.y_cm += self.canvas.report_opts.box_pgap
self.cols[level] = box
if box.level[1] == 0:
self.__last_direct[level] = box
box.x_cm = self.canvas.report_opts.spouse_offset * box.level[1]
self.canvas.set_box_height_width(box)
def add_person_box(self, level, indi_handle, fams_handle, father):
""" Makes a person box and add that person into the Canvas. """
myself = PersonBox(level)
myself.father = father
if myself.level[1] == 0 and self.bold_direct and self.bold_now:
if self.bold_now == 1:
self.bold_now = 0
myself.set_bold()
if level[1] == 0 and father and myself.level[0] != father.level[0]:
#I am a child
if father.line_to:
line = father.line_to
else:
line = LineBase(father)
father.line_to = line
#self.canvas.add_line(line)
line.end.append(myself)
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def add_marriage_box(self, level, indi_handle, fams_handle, father):
""" Makes a marriage box and add that person into the Canvas. """
myself = FamilyBox(level)
#if father is not None:
# myself.father = father
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def recurse(self, person_handle, x_level, s_level, father):
"""traverse the ancestors recursively until
either the end of a line is found,
or until we reach the maximum number of generations
or we reach the max number of spouses
that we want to deal with"""
if not person_handle: return
if x_level > self.max_generations: return
if s_level > 0 and s_level == self.max_spouses: return
if person_handle in self.families_seen: return
myself = None
person = self.database.get_person_from_handle(person_handle)
family_handles = person.get_family_handle_list()
if s_level == 0:
val = family_handles[0] if family_handles else None
myself = self.add_person_box( (x_level, s_level),
person_handle, val, father)
marr = None
spouse = None
if s_level == 1:
tmp_bold = self.bold_now
self.bold_now = 0
for family_handle in family_handles:
if family_handle not in self.families_seen:
self.families_seen.add(family_handle)
family = self.database.get_family_from_handle(family_handle)
#Marriage box if the option is there.
if self.inlc_marr and self.max_spouses > 0:
marr = self.add_marriage_box((x_level, s_level+1),
person_handle, family_handle,
father if s_level else myself)
spouse_handle = ReportUtils.find_spouse(person, family)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
def _spouse_box(who):
return self.add_person_box((x_level, s_level+1),
spouse_handle, family_handle, who)
if s_level > 0:
spouse = _spouse_box(father)
elif self.inlc_marr:
spouse = _spouse_box(marr)
else:
spouse = _spouse_box(myself)
mykids = [kid.ref for kid in family.get_child_ref_list()]
def _child_recurse(who):
self.recurse(child_ref, x_level+1, 0, who)
for child_ref in mykids:
if self.inlc_marr and self.max_spouses > 0:
_child_recurse(marr)
elif spouse:
_child_recurse(spouse)
else:
_child_recurse(myself)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
#spouse_handle = ReportUtils.find_spouse(person,family)
self.recurse(spouse_handle, x_level, s_level+1, spouse)
if s_level == 1:
self.bold_now = tmp_bold
def add_family(self, level, family, father2):
"""
Adds a family into the canvas.
only will be used for my direct grandparents, and my parents only.
"""
family_h = family.get_handle()
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
self.bold_now = 2
if father_h:
father_b = self.add_person_box(
(level, 0), father_h, family_h, father2)
else:
father_b = self.add_person_box(
(level, 0), None, None, father2)
retrn = [father_b]
if self.inlc_marr:
family_b = self.add_marriage_box(
(level, 1), father_h, family_h, father_b)
retrn.append(family_b)
self.families_seen.add(family_h)
if mother_h:
mother_b = self.add_person_box(
(level, 0), mother_h, family_h, father_b)
else:
mother_b = self.add_person_box(
(level, 0), None, None, father_b)
retrn.append(mother_b)
family_line = family_b if self.inlc_marr else father_b
for child_ref in family.get_child_ref_list():
self.recurse(child_ref.ref, level+1, 0, family_line)
self.bold_now = 0
#Set up the lines for the family
if not family_line.line_to:
#no children.
family_line.line_to = LineBase(family_line)
if self.inlc_marr:
family_line.line_to.start.append(father_b)
family_line.line_to.start.append(mother_b)
return retrn
def has_children(self, person_handle):
"""
Quickly check to see if this person has children
still we want to respect the FamiliesSeen list
"""
if not person_handle or person_handle in self.families_seen:
return False
person = self.database.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
if family_handle not in self.families_seen:
family = self.database.get_family_from_handle(family_handle)
if family.get_child_ref_list():
return True
return False
def recurse_if(self, person_handle, level):
"""
Quickly check to see if we want to continue recursion
still we want to respect the FamiliesSeen list
"""
person = self.database.get_person_from_handle(person_handle)
show = False
myfams = person.get_family_handle_list()
if len(myfams) > 1: #and self.max_spouses > 0
show = True
if not self.inlc_marr:
#if the condition is true, we only want to show
#this parent again IF s/he has other children
show = self.has_children(person_handle)
#if self.max_spouses == 0 and not self.has_children(person_handle):
# self.families_seen.add(person_handle)
# show = False
if show:
self.bold_now = 1
self.recurse(person_handle, level, 0, None)
#------------------------------------------------------------------------
#
# Class MakePersonTree (Personal Descendant Tree option)
#
#------------------------------------------------------------------------
class MakePersonTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
self.max_generations -= 1
def start(self, person_id):
"""follow the steps to make a tree off of a person"""
persons = []
center1 = self.database.get_person_from_gramps_id(person_id)
if center1 is None:
raise ReportError(_("Person %s is not in the Database") % person_id)
center1_h = center1.get_handle() #could be mom too.
family2 = family2_h = None
if self.do_parents:
family2_h = center1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
#######################
#don't do center person's parents family.
if family2_h:
self.families_seen.add(family2_h)
#######################
#Center person's Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#Center persons parents only!
#######################
#now it will ONLY be my fathers parents
if family2:
self.add_family( 0, family2, None )
else:
self.bold_now = 2
self.recurse(center1_h, 0, 0, None)
self.bold_now = 0
#######################
#Center person's mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
return persons
#------------------------------------------------------------------------
#
# Class MakeFamilyTree (Familial Descendant Tree option)
#
#------------------------------------------------------------------------
class MakeFamilyTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a family.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
def start(self, family_id):
"""follow the steps to make a tree off of a family"""
## (my) referes to the children of family_id
# Step 1 print out my fathers, fathers,
# other wives families first (if needed)
family1 = self.database.get_family_from_gramps_id(family_id)
if family1 is None:
raise ReportError(_("Family %s is not in the Database") % family_id)
family1_h = family1.get_handle()
#######################
#Initial setup of variables
#######################
father1_h = family1.get_father_handle()
mother1_h = family1.get_mother_handle()
father1 = mother1 = family2 = family2_h = None
if father1_h:
father1 = self.database.get_person_from_handle(father1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = father1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
mother2_h = father2_h = None
if family2: #family2 = fathers parents
mother2_h = family2.get_mother_handle()
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
father2 = self.database.get_person_from_handle(father2_h)
#Helper variables. Assigned in one section, used in another.
father2_id = family2_id = None
mother1_id = None
#######################
#don't do my fathers parents family. will be done later
if family2_h:
self.families_seen.add(family2_h)
#######################
#my father mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#father Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#don't do my parents family in recurse. will be done later
self.families_seen.add(family1_h)
##If dad has no other children from other marriages. remove him
if self.max_spouses == 0 and not self.has_children(father1_h):
self.families_seen.add(father1_h)
#######################
#my fathers parents!
#######################
#now it will ONLY be my fathers parents
#will print dads parents. dad's other wifes will also print
if family2:
myfams = father1.get_family_handle_list()
show = False
if len(myfams) > 1:
show = True
if not self.inlc_marr and self.max_spouses == 0:
#if the condition is true, we only want to show
#this parent again IF s/he has children
show = self.has_children(father1_h)
if not show:
self.families_seen.add(father1_h)
family2_l = self.add_family( 0, family2, None )
elif father1:
#######################
#my father other wives (if all of the above does nothing)
#if my father does not have parents (he is the highest)
#######################
#do his OTHER wives first.
self.recurse_if(father1_h, 1)
#######################
#my father, marriage info, mother, siblings, me
#######################
if family2:
#We need to add dad to the family
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
else:
family2_line = None
family1_l = self.add_family(1, family1, family2_line)
mother1_b = family1_l[-1] #Mom's Box
#make sure there is at least one child in this family.
#if not put in a placeholder
family1_line = family1_l[1] if self.inlc_marr else family1_l[0]
if family1_line.line_to.end == []:
box = PlaceHolderBox((mother1_b.level[0]+1, 0))
box.father = family1_l[0]
self.add_to_col(box)
family1_line.line_to.end = [box]
#######################
#######################
#Lower half
#This will be quite like the first half.
#Just on the mothers side...
#Mom has already been printed with the family
#######################
#######################
#######################
#Initial setup of variables
#######################
mother1_h = family1.get_mother_handle()
family2_h = mother1 = family2 = None
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = mother1.get_main_parents_family_handle()
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
mother2_h = family2.get_mother_handle()
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
father2 = self.database.get_person_from_handle(father2_h)
#######################
#don't do my parents family.
self.families_seen = set([family1_h] )
##If mom has no other children from other marriages. remove her
if self.max_spouses == 0 and not self.has_children(mother1_h):
self.families_seen.add(mother1_h)
if mother1_h:
myfams = mother1.get_family_handle_list()
if len(myfams) < 2:
#If mom didn't have any other families, don't even do her
#she is already here with dad and will be added later
self.families_seen.add(mother1_h)
#######################
#my mother other spouses (if no parents)
#######################
#if my mother does not have parents (she is the highest)
#Then do her OTHER spouses.
if not family2 and mother1:
self.recurse_if(mother1_h, 1)
#######################
#my mothers parents!
#######################
if family2:
family2_l = self.add_family( 0, family2, None )
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
family2_line = family2_line.line_to
if family2_line.end != []:
family2_line.end.insert(0, mother1_b)
else:
family2_line.end = [mother1_b]
#fix me. Moms other siblings have been given an extra space
#Because Moms-father is not siblings-father right now.
mother1_b.father = family2_line
#######################
#my mother mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#mother Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#------------------------------------------------------------------------
#
# Class MakeReport
#
#------------------------------------------------------------------------
class MakeReport(object):
"""
Make a report out of a list of people.
The list of people is already made. Use this information to find where
people will be placed on the canvas.
"""
def __init__(self, dbase, canvas, ind_spouse, compress_tree):
self.database = dbase
self.canvas = canvas
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.inlc_marr = gui.get_val("inc_marr")
self.max_spouses = gui.get_val('maxspouse')
gui = None
self.ind_spouse = ind_spouse
self.compress_tree = compress_tree
self.cols = [[]]
#self.max_generations = 0
#already done in recurse,
#Some of this code needs to be moved up to RecurseDown.add_to_col()
def calc_box(self, box):
""" calculate the max_box_width and max_box_height for the report """
width = box.x_cm + box.width
if width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = width
if box.height > self.canvas.report_opts.max_box_height:
self.canvas.report_opts.max_box_height = box.height
while len(self.cols) <= box.level[0]:
self.cols.append([])
self.cols[box.level[0]].append(box)
#tmp = box.level[0]
#if tmp > self.max_generations:
# self.max_generations = tmp
def __move_col_from_here_down(self, box, amount):
"""Move me and everyone below me in this column only down"""
while box:
box.y_cm += amount
box = box.next
def __move_next_cols_from_here_down(self, box, amount):
"""Move me, everyone below me in this column,
and all of our children (and childrens children) down."""
col = [box]
while col:
if len(col) == 1 and col[0].line_to:
col.append(col[0].line_to.end[0])
col[0].y_cm += amount
col[0] = col[0].next
if col[0] is None:
col.pop(0)
def __next_family_group(self, box):
""" a helper function. Assume box is at the start of a family block.
get this family block. """
while box:
left_group = []
line = None
#Form the parental (left) group.
#am I a direct descendant?
if box.level[1] == 0:
#I am the father/mother.
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if box and box.level[1] != 0 and self.inlc_marr:
#add/start with the marriage box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if box and box.level[1] != 0 and self.max_spouses > 0:
#add/start with the spousal box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.next
if line:
if len(line.start) > 1 and line.start[-1].level[1] == 0:
#a dad and mom family from RecurseDown.add_family. add mom
left_group.append(line.start[-1])
box = box.next
#we now have everyone we want
return left_group, line.end
#else
# no children, so no family. go again until we find one to return.
return None, None
def __reverse_family_group(self):
""" go through the n-1 to 0 cols of boxes looking for families
(parents with children) that may need to be moved. """
for x_col in range(len(self.cols)-1, -1, -1):
box = self.cols[x_col][0] #The first person in this col
while box:
left_group, right_group = self.__next_family_group(box)
if not left_group:
box = None #we found the end of this col
else:
yield left_group, right_group
box = left_group[-1].next
def __calc_movements(self, left_group, right_group):
""" for a family group, see if parents or children need to be
moved down so everyone is the the right/left of each other.
return a right y_cm and a left y_cm. these points will be used
to move parents/children down.
"""
left_up = left_group[0].y_cm
right_up = right_group[0].y_cm
left_center = left_up
right_center = right_up
if self.compress_tree:
#calculate a new left and right move points
for left_line in left_group:
if left_line.line_to:
break
left_center = left_line.y_cm + (left_line.height /2)
left_down = left_group[-1].y_cm + left_group[-1].height
right_down = right_group[-1].y_cm + right_group[-1].height
#Lazy. Move down either side only as much as we NEED to.
if left_center < right_up:
right_center = right_group[0].y_cm
elif left_up == right_up:
left_center = left_up #Lets keep it. top line.
elif left_center > right_down:
right_center = right_down
else:
right_center = left_center
return right_center, left_center
def Make_report(self):
"""
Everyone on the page is as far up as they can go.
Move them down to where they belong.
We are going to go through everyone from right to left
top to bottom moving everyone down as needed to make the report.
"""
seen_parents = False
for left_group, right_group in self.__reverse_family_group():
right_y_cm, left_y_cm = self.__calc_movements(left_group,
right_group)
#1. Are my children too high? if so move then down!
if right_y_cm < left_y_cm:
#we have to push our kids (and their kids) down.
#We also need to push down all the kids (under)
#these kids (in their column)
amt = (left_y_cm - right_y_cm)
self.__move_next_cols_from_here_down(right_group[0], amt)
#2. Am I (and spouses) too high? if so move us down!
elif left_y_cm < right_y_cm:
#Ok, I am too high. Move me down
amt = (right_y_cm - left_y_cm)
self.__move_col_from_here_down(left_group[0], amt)
#6. now check to see if we are working with dad and mom.
#if so we need to move down marriage information
#and mom!
left_line = left_group[0].line_to
if not left_line:
left_line = left_group[1].line_to
#left_line = left_line.start
if len(left_line.start) > 1 and not seen_parents:
#only do Dad and Mom. len(left_line) > 1
seen_parents = True
mom_cm = left_group[-1].y_cm + left_group[-1].height/2
last_child_cm = right_group[-1].y_cm
if not self.compress_tree:
last_child_cm += right_group[-1].height/2
move_amt = last_child_cm - mom_cm
#if the moms height is less than the last childs height
#The 0.2 is to see if this is even worth it.
if move_amt > 0.2:
#our children take up more space than us parents.
#so space mom out!
self.__move_col_from_here_down(left_group[-1], move_amt)
#move marriage info
if self.inlc_marr:
left_group[1].y_cm += move_amt/2
if left_line.end[0].boxstr == 'None':
left_line.end = []
def start(self):
"""Make the report"""
#for person in self.persons.depth_first_gen():
for box in self.canvas.boxes:
self.calc_box(box)
#At this point we know everything we need to make the report.
#Width of each column of people - self.rept_opt.box_width
#width of each column (or row) of lines - self.rept_opt.col_width
if not self.cols[0]:
#We wanted to print parents of starting person/family but
#there were none!
#remove column 0 and move everyone back one level
self.cols.pop(0)
for box in self.canvas.boxes:
box.level = (box.level[0] - 1, box.level[1])
#go ahead and set it now.
width = self.canvas.report_opts.max_box_width
for box in self.canvas.boxes:
box.width = width - box.x_cm
box.x_cm += self.canvas.report_opts.littleoffset
box.x_cm += (box.level[0] *
(self.canvas.report_opts.col_width +
self.canvas.report_opts.max_box_width))
box.y_cm += self.canvas.report_opts.littleoffset
box.y_cm += self.canvas.title.height
self.Make_report()
class GuiConnect():
""" This is a BORG object. There is ONLY one.
This give some common routines that EVERYONE can use like
get the value from a GUI variable
"""
__shared_state = {}
def __init__(self): #We are BORG!
self.__dict__ = self.__shared_state
def set__opts(self, options, which):
self._opts = options
self._which_report = which.split(",")[0]
def get_val(self, val):
""" Get a GUI value. """
value = self._opts.get_option_by_name(val)
if value:
return value.get_value()
else:
False
def Title_class(self, database, doc):
Title_type = self.get_val('report_title')
if Title_type == 0: #None
return TitleNone(database, doc)
if Title_type == 1: #Descendant Chart
if self._which_report == _RPT_NAME:
if self.get_val('show_parents'):
return TitleDPY(database, doc)
else:
return TitleDPN(database, doc)
else:
if self.get_val('show_parents'):
return TitleDFY(database, doc)
else:
return TitleDFN(database, doc)
if Title_type == 2:
return TitleF(database, doc)
else: #Title_type == 3
return TitleC(database, doc)
def Make_Tree(self, database, canvas):
if self._which_report == _RPT_NAME:
return MakePersonTree(database, canvas)
else:
return MakeFamilyTree(database, canvas)
def calc_lines(self, database):
#calculate the printed lines for each box
display_repl = self.get_val("replace_list")
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
return CalcLines(database, display_repl)
def working_lines(self, box):
display = self.get_val("descend_disp")
#if self.get_val('diffspouse'):
display_spou = self.get_val("spouse_disp")
#else:
# display_spou = display
display_marr = [self.get_val("marr_disp")]
if box.boxstr == "CG2-fam-box": #(((((
workinglines = display_marr
elif box.level[1] > 0 or (box.level[0] == 0 and box.father):
workinglines = display_spou
else:
workinglines = display
return workinglines
#------------------------------------------------------------------------
#
# DescendTree
#
#------------------------------------------------------------------------
class DescendTree(Report):
def __init__(self, database, options, user):
"""
Create DescendTree object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.options = options
self.database = database
""" make the report in its full size and pages to print on
scale one or both as needed/desired.
"""
database = self.database
self.Connect = GuiConnect()
self.Connect.set__opts(self.options.menu, self.options.name)
style_sheet = self.doc.get_style_sheet()
font_normal = style_sheet.get_paragraph_style("CG2-Normal").get_font()
#The canvas that we will put our report on and print off of
self.canvas = Canvas(self.doc,
ReportOptions(self.doc, font_normal, "CG2-line"))
self.canvas.report_opts.box_shadow *= \
self.Connect.get_val('shadowscale')
self.canvas.report_opts.box_pgap *= self.Connect.get_val('box_Yscale')
self.canvas.report_opts.box_mgap *= self.Connect.get_val('box_Yscale')
center_id = self.Connect.get_val('pid')
#make the tree
tree = self.Connect.Make_Tree(database, self.canvas)
tree.start(center_id)
tree = None
#Title
title = self.Connect.Title_class(database, self.doc)
title.calc_title(center_id)
self.canvas.add_title(title)
#make the report as big as it wants to be.
ind_spouse = self.Connect.get_val("ind_spouse")
compress_tree = self.Connect.get_val('compress_tree')
report = MakeReport(database, self.canvas, ind_spouse, compress_tree)
report.start()
report = None
#note?
if self.Connect.get_val("inc_note"):
note_box = NoteBox(self.doc, "CG2-note-box",
self.Connect.get_val("note_place"))
subst = SubstKeywords(self.database, None, None)
note_box.text = subst.replace_and_clean(
self.Connect.get_val('note_disp'))
self.canvas.add_note(note_box)
#Now we have the report in its full size.
#Do we want to scale the report?
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
scale = self.canvas.scale_report(one_page,
scale_report != 0, scale_report == 2)
if scale != 1 or self.Connect.get_val('shadowscale') != 1.0:
self.scale_styles(scale)
def write_report(self):
""" Canvas now has everyone ready to print. Get some misc stuff
together and print. """
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
#Inlc_marr = self.Connect.get_val("inc_marr")
inc_border = self.Connect.get_val('inc_border')
incblank = self.Connect.get_val("inc_blank")
prnnum = self.Connect.get_val("inc_pagenum")
#ind_spouse = self.Connect.get_val("ind_spouse")
lines = self.Connect.get_val('note_disp')
#####################
#Setup page information
colsperpage = self.doc.get_usable_width()
colsperpage += self.canvas.report_opts.col_width
tmp = self.canvas.report_opts.max_box_width
tmp += self.canvas.report_opts.col_width
colsperpage = int(colsperpage / tmp)
colsperpage = colsperpage or 1
#####################
#Vars
#p = self.doc.get_style_sheet().get_paragraph_style("CG2-Normal")
#font = p.get_font()
if prnnum:
page_num_box = PageNumberBox(self.doc, 'CG2-box')
#####################
#ok, everyone is now ready to print on the canvas. Paginate?
self.canvas.sort_boxes_on_y_cm()
self.canvas.paginate(colsperpage, one_page)
#####################
#Yeah!!!
#lets finally make some pages!!!
#####################
for page in self.canvas.page_iter_gen(incblank):
self.doc.start_page()
#do we need to print a border?
if inc_border:
page.draw_border('CG2-line')
#Do we need to print the page number?
if prnnum:
page_num_box.display(page)
page.display()
self.doc.end_page()
def scale_styles(self, amount):
"""
Scale the styles for this report. This must be done in the constructor.
"""
style_sheet = self.doc.get_style_sheet()
graph_style = style_sheet.get_draw_style("CG2-fam-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-fam-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2b-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2b-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-note-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-note-box", graph_style)
para_style = style_sheet.get_paragraph_style("CG2-Title")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Title", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Normal")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Normal", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Bold")
font = para_style.get_font()
font.set_bold(True)
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Bold", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Note")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Note", para_style)
self.doc.set_style_sheet(style_sheet)
#------------------------------------------------------------------------
#
# DescendTreeOptions
#
#------------------------------------------------------------------------
class DescendTreeOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__onepage = None
self.__inc_title = None
self.__title = None
self.__blank = None
self.scale = None
self.__db = dbase
self.name = name
self.box_Y_sf = None
self.box_shadow_sf = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the descendant report.
"""
##################
category_name = _("Tree Options")
if self.name.split(",")[0] == _RPT_NAME:
self.__pid = PersonOption(_("Report for"))
self.__pid.set_help(_("The main person for the report"))
menu.add_option(category_name, "pid", self.__pid)
else: #if self.name == "familial_descend_tree":
self.__pid = FamilyOption(_("Report for"))
self.__pid.set_help(_("The main family for the report"))
menu.add_option(category_name, "pid", self.__pid)
self.showparents = BooleanOption(
_('Start with the parent(s) of the selected first'),
False)
self.showparents.set_help(
_("Will show the parents, brother and sisters of the "
"selected person.")
)
menu.add_option(category_name, "show_parents", self.showparents)
max_gen = NumberOption(_("Generations"), 10, 1, 50)
max_gen.set_help(_("The number of generations to include in the tree"))
menu.add_option(category_name, "maxgen", max_gen)
max_spouse = NumberOption(_("Level of Spouses"), 1, 0, 10)
max_spouse.set_help(_("0=no Spouses, 1=include Spouses, 2=include "
"Spouses of the spouse, etc"))
menu.add_option(category_name, "maxspouse", max_spouse)
compresst = BooleanOption(_('Co_mpress tree'), False)
compresst.set_help(_("Whether to move people up, where possible, "
"resulting in a smaller tree"))
menu.add_option(category_name, "compress_tree", compresst)
##################
category_name = _("Display")
disp = TextOption(_("Descendant\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"{%s $d}" %_DIED])
disp.set_help(_("Display format for a descendant."))
menu.add_option(category_name, "descend_disp", disp)
bold = BooleanOption(_('Bold direct descendants'), True)
bold.set_help(
_("Whether to bold those people that are direct "
"(not step or half) descendants.")
)
menu.add_option(category_name, "bolddirect", bold)
#bug 4767
#diffspouse = BooleanOption(
# _("Use separate display format for spouses"),
# True)
#diffspouse.set_help(_("Whether spouses can have a different format."))
#menu.add_option(category_name, "diffspouse", diffspouse)
indspouce = BooleanOption(_('Indent Spouses'), True)
indspouce.set_help(_("Whether to indent the spouses in the tree."))
menu.add_option(category_name, "ind_spouse", indspouce)
sdisp = TextOption(_("Spousal\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"{%s $d}" %_DIED])
sdisp.set_help(_("Display format for a spouse."))
menu.add_option(category_name, "spouse_disp", sdisp)
incmarr = BooleanOption(_('Include Marriage box'), True)
incmarr.set_help(
_("Whether to include a separate marital box in the report"))
menu.add_option(category_name, "inc_marr", incmarr)
marrdisp = StringOption(_("Marriage\nDisplay Format"), "%s $m" % _MARR)
marrdisp.set_help(_("Display format for the marital box."))
menu.add_option(category_name, "marr_disp", marrdisp)
##################
category_name = _("Replace")
repldisp = TextOption(
_("Replace Display Format:\n'Replace this'/' with this'"),
[])
repldisp.set_help(_("i.e.\nUnited States of America/U.S.A"))
menu.add_option(category_name, "replace_list", repldisp)
##################
category_name = _("Size")
self.scale = EnumeratedListOption(_("Scale tree to fit"), 0)
self.scale.add_item( 0, _("Do not scale tree"))
self.scale.add_item( 1, _("Scale tree to fit page width only"))
self.scale.add_item( 2, _("Scale tree to fit the size of the page"))
self.scale.set_help(
_("Whether to scale the tree to fit a specific paper size")
)
menu.add_option(category_name, "scale_tree", self.scale)
self.scale.connect('value-changed', self.__check_blank)
if "BKI" not in self.name.split(","):
self.__onepage = BooleanOption(_("Resize Page to Fit Tree size\n"
"\n"
"Note: Overrides options in the 'Paper Option' tab"
),
False)
self.__onepage.set_help(
_("Whether to resize the page to fit the size \n"
"of the tree. Note: the page will have a \n"
"non standard size.\n"
"\n"
"With this option selected, the following will happen:\n"
"\n"
"With the 'Do not scale tree' option the page\n"
" is resized to the height/width of the tree\n"
"\n"
"With 'Scale tree to fit page width only' the height of\n"
" the page is resized to the height of the tree\n"
"\n"
"With 'Scale tree to fit the size of the page' the page\n"
" is resized to remove any gap in either height or width"
))
menu.add_option(category_name, "resize_page", self.__onepage)
self.__onepage.connect('value-changed', self.__check_blank)
else:
self.__onepage = None
self.box_Y_sf = NumberOption(_("inter-box Y scale factor"),
1.00, 0.10, 2.00, 0.01)
self.box_Y_sf.set_help(_("Make the inter-box Y bigger or smaller"))
menu.add_option(category_name, "box_Yscale", self.box_Y_sf)
self.box_shadow_sf = NumberOption(_("box shadow scale factor"),
1.00, 0.00, 2.00, 0.01) # down to 0
self.box_shadow_sf.set_help(_("Make the box shadow bigger or smaller"))
menu.add_option(category_name, "shadowscale", self.box_shadow_sf)
##################
category_name = _("Include")
self.title = EnumeratedListOption(_("Report Title"), 0)
self.title.add_item( 0, _("Do not include a title"))
self.title.add_item( 1, _("Descendant Chart for [selected person(s)]"))
self.title.set_help(_("Choose a title for the report"))
menu.add_option(category_name, "report_title", self.title)
self.showparents.connect('value-changed', self.__Title_enum)
border = BooleanOption(_('Include a border'), False)
border.set_help(_("Whether to make a border around the report."))
menu.add_option(category_name, "inc_border", border)
prnnum = BooleanOption(_('Include Page Numbers'), False)
prnnum.set_help(_("Whether to include page numbers on each page."))
menu.add_option(category_name, "inc_pagenum", prnnum)
self.__blank = BooleanOption(_('Include Blank Pages'), True)
self.__blank.set_help(_("Whether to include pages that are blank."))
menu.add_option(category_name, "inc_blank", self.__blank)
#category_name = _("Notes")
self.usenote = BooleanOption(_('Include a note'), False)
self.usenote.set_help(
_("Whether to include a note on the report.")
)
menu.add_option(category_name, "inc_note", self.usenote)
self.notedisp = TextOption(_("Note"),[])
self.notedisp.set_help(_("Add a note"
"\n\n$T inserts today's date"))
menu.add_option(category_name, "note_disp", self.notedisp)
locals = NoteType(0)
notelocal = EnumeratedListOption(_("Note Location"), 2)
for num, text in locals.note_locals():
notelocal.add_item( num, text )
notelocal.set_help(_("Where to place the note."))
menu.add_option(category_name, "note_place", notelocal)
def __check_blank(self):
"""dis/enables the 'print blank pages' checkbox"""
if self.__onepage:
value = not self.__onepage.get_value()
else:
value = True
off = value and (self.scale.get_value() != 2)
self.__blank.set_available( off )
def __Title_enum(self):
item_list = [
[0, _("Do not include a title") ],
[1, _("Descendant Chart for [selected person(s)]") ],
]
if self.name.split(",")[0] != _RPT_NAME:
item_list.append(
[2, _("Family Chart for [names of chosen family]") ]
)
if self.showparents.get_value():
item_list.append(
[3, _("Cousin Chart for [names of children]") ]
)
self.title.set_items(item_list)
def make_default_style(self, default_style):
"""Make the default output style for the Descendant Tree."""
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
## Paragraph Styles:
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_alignment(PARA_ALIGN_CENTER)
para_style.set_description(
_("The basic style used for the title display.")
)
default_style.add_paragraph_style("CG2-Title", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the text display.')
)
default_style.add_paragraph_style("CG2-Normal", para_style)
#Set the size of the shadow based on the font size! Much better
#will be set later too.
box_shadow = PT2CM(font.get_size()) * .6
font.set_bold(True)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The bold style used for the text display.')
)
default_style.add_paragraph_style("CG2-Bold", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the note display.')
)
default_style.add_paragraph_style("CG2-Note", para_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Title")
graph_style.set_color((0, 0, 0))
graph_style.set_fill_color((255, 255, 255))
graph_style.set_line_width(0)
default_style.add_draw_style("CG2-Title", graph_style)
## Draw styles
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-fam-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Bold")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2b-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Note")
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("CG2-note-box", graph_style)
graph_style = GraphicsStyle()
default_style.add_draw_style("CG2-line", graph_style)
#=====================================
#So do not fear, for I am with you; do not be dismayed,
#for I am your God. I will strengthen you and help you;
#I will uphold you with my righteous right hand.
#Isaiah 41:10
|
arunkgupta/gramps
|
gramps/plugins/drawreport/descendtree.py
|
Python
|
gpl-2.0
| 66,162
|
from __future__ import absolute_import
import logging
import io
import os
import os.path
import shutil
import tempfile
import urllib
import zipfile
import csv
from datetime import datetime
from org.bccvl import movelib
from org.bccvl.movelib.utils import build_source, build_destination
from org.bccvl.movelib.utils import zip_occurrence_data
from org.bccvl.tasks.celery import app
from org.bccvl.tasks.utils import extract_metadata
from org.bccvl.tasks.utils import set_progress, import_cleanup
from org.bccvl.tasks.utils import set_progress_job, import_cleanup_job
from org.bccvl.tasks.utils import import_ala_job
SPECIES = 'species'
LONGITUDE = 'lon'
LATITUDE = 'lat'
EVENT_DATE = 'date'
YEAR = 'year'
MONTH = 'month'
LOG = logging.getLogger(__name__)
def _process_trait_data(datadir):
# check that it is valid trait csv file
count = 0
csvfile = os.path.join(datadir, 'zoatrack_trait.csv')
with io.open(csvfile, mode='br+') as csv_file:
csv_reader = csv.reader(csv_file)
# Check if csv file header has the necessary columns
columns = set(['decimalLatitude', 'decimalLongitude',
'speciesScientificName', 'month', 'year', 'eventDate',
'organismId', 'eventId'])
csv_headers = next(csv_reader)
missing_columns = ', '.join(columns.difference(csv_headers))
if missing_columns:
raise Exception("Missing columns '{}' in dataset".format(missing_columns))
# These columns must be before other trait columns.
for header in columns:
if csv_headers.index(header) >= len(columns):
raise Exception("Column '{}' must be before trait columns in the dataset".format(header))
# rename column names
csv_headers[csv_headers.index('decimalLatitude')] = LATITUDE
csv_headers[csv_headers.index('decimalLongitude')] = LONGITUDE
csv_headers[csv_headers.index('speciesScientificName')] = SPECIES
csv_headers[csv_headers.index('eventDate')] = EVENT_DATE
# write to a temp file
with io.open(os.path.join(datadir, 'trait_temp.csv'), mode='wb') as out_file:
csv_writer = csv.writer(out_file)
csv_writer.writerow(csv_headers)
for row in csv_reader:
csv_writer.writerow(row)
count += 1
# overwrite the trait csv file with the temp file
os.remove(os.path.join(datadir, 'zoatrack_trait.csv'))
os.rename(os.path.join(datadir, 'trait_temp.csv'),
os.path.join(datadir, 'zoatrack_trait.csv'))
return count
def download_zoatrack_trait_data(src_url, dest):
# Get trait file
data_dest = os.path.join(dest, 'data')
try:
trait_zipfile, _ = urllib.urlretrieve(src_url)
# unzip and rename trait file
with zipfile.ZipFile(trait_zipfile) as z:
os.mkdir(data_dest)
# rename trait data csv file
z.extract('trait.csv', dest)
os.rename(os.path.join(dest, 'trait.csv'),
os.path.join(data_dest, 'zoatrack_trait.csv'))
# citation file is optional
try:
z.extract('citation.txt', dest)
os.rename(os.path.join(dest, 'citation.txt'),
os.path.join(data_dest, 'zoatrack_citation.txt'))
except Exception:
pass
except KeyError:
LOG.error("Cannot find file %s in downloaded zip file", 'trait.csv', exc_info=True)
raise
except Exception:
# TODO: Not a zip file error.... does it have to raise?
LOG.error("The downloaded file from %s is not a zip file", src_url, exc_info=True)
raise
finally:
# Remove the downloaded temp file
if trait_zipfile and os.path.isfile(trait_zipfile):
os.remove(trait_zipfile)
count = 0
try:
count = _process_trait_data(data_dest)
except Exception:
LOG.error('Bad column header in downloaded trait file', exc_info=True)
raise
# Zip out files if available
zip_occurrence_data(os.path.join(dest, 'zoatrack_trait.zip'),
data_dest,
['zoatrack_trait.csv', 'zoatrack_citation.txt'])
return os.path.join(dest, 'zoatrack_trait.zip'), count
@app.task()
def pull_traits_from_zoatrack(species, src_url, dest_url, context):
# 1. set progress
spName = ', '.join(species)
set_progress('RUNNING', 'Download {0} from zoatrack'.format(
spName), None, context)
# 2. do download
try:
tmpdir = tempfile.mkdtemp(prefix='zoatrack_download_')
# Trait data file is a zip file; trait data file and citation file
trait_zip, count = download_zoatrack_trait_data(src_url, tmpdir)
if count == 0:
raise Exception("No trait data is found")
# extract metadata and do other stuff....
set_progress('RUNNING', 'Extract metadata {0} from zoatrack'.format(
spName), None, context)
# build item to import
imported_date = datetime.now().strftime('%d/%m/%Y')
title = "{} trait data".format(spName)
description = "Observed trait data for {}, imported from ZoaTack on {}".format(
spName, imported_date)
item = {
'title': title,
'description': description,
'file': {
'url': 'file://{}'.format(trait_zip), # local file url
'contenttype': 'application/zip',
'filename': os.path.basename(trait_zip)
},
#'bccvlmetadata': bccvlmd,
'filemetadata': extract_metadata(trait_zip, 'application/zip'),
}
# Add the number of trait records to the metadata
# To do: This is a hack. Any better solution.
trait_csv_filename = os.path.join('data', 'zoatrack_trait.csv')
if trait_csv_filename in item['filemetadata']:
# FIXME: copy all occurrence metadata to zip level, for backwards
# compatibility... this should go away after we fully support
# 'layered' occurrence zips.
for key in ('rows', 'headers', 'bounds'):
if key in item['filemetadata'][trait_csv_filename]['metadata']:
item['filemetadata'][key] = item['filemetadata'][
trait_csv_filename]['metadata'][key]
# TODO: clean this up
# remove citation file from metadata, otherwise it will be
# interpreted as data layer within zip file
if 'data/zoatrack_citation.csv' in item.get('filemetadata', {}):
del item['filemetadata']['data/zoatrack_citation.csv']
# move data file to destination and build data_url
src = build_source('file://{}'.format(trait_zip))
dst = build_destination(os.path.join(
dest_url, os.path.basename(trait_zip)), app.conf.get('bccvl', {}))
item['file']['url'] = dst['url']
movelib.move(src, dst)
# tell importer about new dataset (import it)
set_progress('RUNNING', 'Import zoatack trait data {0}'.format(spName),
None, context)
cleanup_job = import_cleanup_job(dest_url, context)
import_job = import_ala_job([item], dest_url, context)
import_job.link_error(set_progress_job(
"FAILED", "Import of zoatack trait data failed {0}".format(spName),
None, context))
import_job.link_error(cleanup_job)
finish_job = set_progress_job(
"COMPLETED", 'ZoaTack import {} complete'.format(spName), None,
context)
(import_job | cleanup_job | finish_job).delay()
except Exception as e:
set_progress('FAILED', 'Download Traits from zoatack: {0}'.format(e),
None, context)
import_cleanup(dest_url, context)
LOG.error('Download from %s to %s failed: %s',
src_url, dest_url, e, exc_info=True)
finally:
if tmpdir and os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
|
BCCVL/org.bccvl.tasks
|
src/org/bccvl/tasks/datamover/zoatrack.py
|
Python
|
gpl-2.0
| 8,153
|
import os
import django
# placeholder for gettext
def _(s):
return s
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'j#zwt2c!*(7(jz!m(tr$+jq^1d(+)e(^059f^nd_(*zj!gv0x)'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'modeltranslation',
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'celery',
'django_rq',
'rest_framework',
'app',
'mtr.sync',
'mtr.utils'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
MIDDLEWARE_CLASSES += (
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
MIDDLEWARE_CLASSES += (
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# django 1.7 support
if django.VERSION < (1, 8, 0):
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
'django.core.context_processors.request',
"django.contrib.messages.context_processors.messages",
'mtr.sync.context_processors.settings'
)
else:
TEMPLATES = ({
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'mtr.sync.context_processors.settings'
)
}},
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en'
LANGUAGES = (
('de', _('German')),
('en', _('English')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
BROKER_BACKEND = 'memory'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
'DEFAULT_TIMEOUT': 360,
},
}
SYNC_SETTINGS = {
'actions': ['app.sync'],
'broker': 'celery',
'include': {
'api': True,
'admin': True,
}
}
|
mtrgroup/django-mtr-sync
|
tests/app/settings.py
|
Python
|
mit
| 3,396
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.