repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
irwinlove/django | refs/heads/master | tests/template_tests/filter_tests/test_cut.py | 521 | from django.template.defaultfilters import cut
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class CutTests(SimpleTestCase):
@setup({'cut01': '{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}'})
def test_cut01(self):
output = self.engine.render_to_string('cut01', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut02': '{{ a|cut:"x" }} {{ b|cut:"x" }}'})
def test_cut02(self):
output = self.engine.render_to_string('cut02', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut03': '{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}'})
def test_cut03(self):
output = self.engine.render_to_string('cut03', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
@setup({'cut04': '{{ a|cut:"&" }} {{ b|cut:"&" }}'})
def test_cut04(self):
output = self.engine.render_to_string('cut04', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
# Passing ';' to cut can break existing HTML entities, so those strings
# are auto-escaped.
@setup({'cut05': '{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}'})
def test_cut05(self):
output = self.engine.render_to_string('cut05', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
@setup({'cut06': '{{ a|cut:";" }} {{ b|cut:";" }}'})
def test_cut06(self):
output = self.engine.render_to_string('cut06', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&ampy")
class FunctionTests(SimpleTestCase):
def test_character(self):
self.assertEqual(cut('a string to be mangled', 'a'), ' string to be mngled')
def test_characters(self):
self.assertEqual(cut('a string to be mangled', 'ng'), 'a stri to be maled')
def test_non_matching_string(self):
self.assertEqual(cut('a string to be mangled', 'strings'), 'a string to be mangled')
def test_non_string_input(self):
self.assertEqual(cut(123, '2'), '13')
|
GinnyN/towerofdimensions-django | refs/heads/master | tests/regressiontests/localflavor/se/__init__.py | 12133432 | |
amohanta/thug | refs/heads/master | src/Java/__init__.py | 12133432 | |
saurabh6790/test_final_med_app | refs/heads/master | buying/report/item_wise_purchase_history/__init__.py | 12133432 | |
aakash-cr7/zulip | refs/heads/master | zerver/webhooks/solano/__init__.py | 12133432 | |
tatianerd/big-list-of-naughty-strings | refs/heads/master | scripts/txt_to_json.py | 60 | ### Quick Python Script to convert the Big List of Naughty Strings into a JSON file
###
### By Max Woolf
import json
with open('../blns.txt', 'r') as f:
# put all lines in the file into a Python list
content = f.readlines()
# above line leaves trailing newline characters; strip them out
content = [x.strip('\n') for x in content]
# remove empty-lines and comments
content = [x for x in content if x and not x.startswith('#')]
# insert empty string since all are being removed
content.insert(0, "")
# special case: convert "\" to "\\" for valid JSON
#content = map(lambda x: x.replace('\','\\'), content)
with open('../blns.json', 'wb') as f:
# write JSON to file; note the ensure_ascii parameter
json.dump(content, f, indent=2, ensure_ascii=False)
|
Universal-Model-Converter/UMC3.0a | refs/heads/master | data/Python/x86/Lib/test/test_glob.py | 89 | import glob
import os
import shutil
import sys
import unittest
from test.test_support import run_unittest, TESTFN
def fsdecode(s):
return unicode(s, sys.getfilesystemencoding())
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
shutil.rmtree(self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
ures = [fsdecode(x) for x in res]
self.assertEqual(glob.glob(fsdecode(p)), ures)
self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
self.assertEqual({type(r) for r in res}, {str})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertEqual({type(r) for r in res}, {str})
# test return types are unicode, but only if os.listdir
# returns unicode filenames
tmp = os.listdir(fsdecode(os.curdir))
if {type(x) for x in tmp} == {unicode}:
res = glob.glob(u'*')
self.assertEqual({type(r) for r in res}, {unicode})
res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_unicode_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with an
# unicode argument.
res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
{fsdecode(self.norm('aaa') + os.sep),
fsdecode(self.norm('aab') + os.sep)},
])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
self.norm('sym3', 'EF')])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(u'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(u'?:'), [])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
|
tgstation/tgstation | refs/heads/master | tools/dmi/test.py | 15 | import os
import sys
from dmi import *
def _self_test():
# test: can we load every DMI in the tree
count = 0
for dirpath, dirnames, filenames in os.walk('.'):
if '.git' in dirnames:
dirnames.remove('.git')
for filename in filenames:
if filename.endswith('.dmi'):
fullpath = os.path.join(dirpath, filename)
try:
Dmi.from_file(fullpath)
except Exception:
print('Failed on:', fullpath)
raise
count += 1
print(f"{os.path.relpath(__file__)}: successfully parsed {count} .dmi files")
def _usage():
print(f"Usage:")
print(f" tools{os.sep}bootstrap{os.sep}python -m {__spec__.name}")
exit(1)
def _main():
if len(sys.argv) == 1:
return _self_test()
return _usage()
if __name__ == '__main__':
_main()
|
oriel-hub/api | refs/heads/master | django/idsapi/profiles/urls.py | 1 | """
URLConf for Django user profile management.
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/profiles/'.
If the default behavior of the profile views is acceptable to you,
simply use a line like this in your root URLConf to set up the default
URLs for profiles::
(r'^profiles/', include('profiles.urls')),
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to keep the name ``profiles_profile_detail`` for the pattern
which points to the ``profile_detail`` view, since several views use
``reverse()`` with that name to generate a default post-submission
redirect. If you don't use that name, remember to explicitly pass
``success_url`` to those views.
"""
from django.conf.urls.defaults import url
from profiles import views
urlpatterns = [
url(r'^create/$',
views.create_profile,
name='profiles_create_profile'),
url(r'^edit/$',
views.edit_profile,
name='profiles_edit_profile'),
url(r'^(?P<username>\w+)/$',
views.profile_detail,
name='profiles_profile_detail'),
url(r'^$',
views.profile_list,
name='profiles_profile_list'),
]
|
Annelutfen/gittigidiyor-python | refs/heads/master | gittigidiyor/authenticationservice.py | 1 | # -*- coding: utf-8 -*-
#####################################################################
# Copyright (c) <2010> <GittiGidiyor> #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, #
# copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following #
# conditions: #
# #
# The above copyright notice and this permission notice shall be #
# included in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, #
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #
# OTHER DEALINGS IN THE SOFTWARE. #
#####################################################################
from service import *
from jsonbuilder import *
from xmlbuilder import *
class AuthenticationService(Service):
"""
This class represents the Authentication Service of the GittiGidiyor RESTLIKE API.
"""
def __init__(self, auth):
"""
Base Constructor. \n
auth = Auth(username = 'testuser', password = 'testpassword', key = 'apikey', secret = 'apisecret') \n
authenticationApi = AuthenticationService(auth)
"""
Service.__init__(self, "authentication", auth)
self.headers = None
def createToken(self, inputCT, outputCT, lang):
"""
Performs the 'createToken' API method for the Authentication Service API of Gittigidiyor. \n
inputCT: output content type (xml or json) \n
outputCT: output content type (xml or json) \n
lang: 'tr' or 'en' \n
"""
url = "https://dev.gittigidiyor.com/listingapi/rlws/community/auth?method=createToken&outputCT=%s&inputCT=%s&apiKey=%s&sign=%s&time=%s&lang=%s"
timestamp = self.createTimeStamp()
signature = self.signature(timestamp)
url = url % (outputCT, inputCT, self.auth.key, signature, timestamp, lang)
response, content = self.makeRequest(url)
self.headers = response
return content
def getLoginURL(self, tokenId, redirectUrl):
"""
Outputs the login url where the user grants permission to his GittGidiyor.com profile.
"""
url = "http://dev.gittigidiyor.com/api/login.gg?apiKey=%s&sign=%s&time=%s&tokenId=%s&redirectUrl=%s"
timestamp = self.createTimeStamp()
signature = self.signature(timestamp)
url = url % (self.auth.key, signature, timestamp, tokenId, redirectUrl)
return url
|
miarmak/CloudFerry | refs/heads/master | cloudferrylib/os/actions/deploy_volumes.py | 9 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.base.action import action
from cloudferrylib.utils import utils as utl
import copy
OLD_ID = 'old_id'
class DeployVolumes(action.Action):
def run(self, storage_info={}, identity_info={}, **kwargs):
storage_info = copy.deepcopy(storage_info)
deploy_info = copy.deepcopy(storage_info)
deploy_info.update(identity_info)
storage_info.update(identity_info)
volume_resource = self.cloud.resources[utl.STORAGE_RESOURCE]
new_ids = volume_resource.deploy(deploy_info)
storage_info_new = {
utl.VOLUMES_TYPE:
{
}
}
volumes = storage_info_new[utl.VOLUMES_TYPE]
for new_id, old_id in new_ids.iteritems():
volume = volume_resource.read_info(id=new_id)
volume[utl.VOLUMES_TYPE][new_id][OLD_ID] = old_id
volume[utl.VOLUMES_TYPE][new_id]['snapshots'] = \
storage_info[utl.VOLUMES_TYPE][old_id]['snapshots']
volume[utl.VOLUMES_TYPE][new_id][utl.META_INFO] = \
storage_info[utl.VOLUMES_TYPE][old_id][utl.META_INFO]
volumes.update(volume[utl.VOLUMES_TYPE])
return {
'storage_info': storage_info_new
}
|
nicproulx/mne-python | refs/heads/placeholder | mne/io/eeglab/tests/__init__.py | 12133432 | |
fenglu-g/incubator-airflow | refs/heads/master | tests/contrib/operators/test_gcp_function_operator.py | 4 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from googleapiclient.errors import HttpError
from parameterized import parameterized
from airflow.contrib.operators.gcp_function_operator import \
GcfFunctionDeployOperator, GcfFunctionDeleteOperator, FUNCTION_NAME_PATTERN
from airflow import AirflowException
from airflow.version import version
from copy import deepcopy
try:
# noinspection PyProtectedMember
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
EMPTY_CONTENT = ''.encode('utf8')
MOCK_RESP_404 = type('', (object,), {"status": 404})()
GCP_PROJECT_ID = 'test_project_id'
GCP_LOCATION = 'test_region'
GCF_SOURCE_ARCHIVE_URL = 'gs://folder/file.zip'
GCF_ENTRYPOINT = 'helloWorld'
FUNCTION_NAME = 'projects/{}/locations/{}/functions/{}'.format(GCP_PROJECT_ID,
GCP_LOCATION,
GCF_ENTRYPOINT)
GCF_RUNTIME = 'nodejs6'
VALID_RUNTIMES = ['nodejs6', 'nodejs8', 'python37']
VALID_BODY = {
"name": FUNCTION_NAME,
"entryPoint": GCF_ENTRYPOINT,
"runtime": GCF_RUNTIME,
"httpsTrigger": {},
"sourceArchiveUrl": GCF_SOURCE_ARCHIVE_URL
}
def _prepare_test_bodies():
body_no_name = deepcopy(VALID_BODY)
body_no_name.pop('name', None)
body_empty_entry_point = deepcopy(VALID_BODY)
body_empty_entry_point['entryPoint'] = ''
body_empty_runtime = deepcopy(VALID_BODY)
body_empty_runtime['runtime'] = ''
body_values = [
({}, "The required parameter 'body' is missing"),
(body_no_name, "The required body field 'name' is missing"),
(body_empty_entry_point,
"The body field 'entryPoint' of value '' does not match"),
(body_empty_runtime, "The body field 'runtime' of value '' does not match"),
]
return body_values
class GcfFunctionDeployTest(unittest.TestCase):
@parameterized.expand(_prepare_test_bodies())
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_body_empty_or_missing_fields(self, body, message, mock_hook):
mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl'
with self.assertRaises(AirflowException) as cm:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn(message, str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_deploy_execute(self, mock_hook):
mock_hook.return_value.get_function.side_effect = mock.Mock(
side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found'))
mock_hook.return_value.create_new_function.return_value = True
op = GcfFunctionDeployOperator(
project_id=GCP_PROJECT_ID,
location=GCP_LOCATION,
body=deepcopy(VALID_BODY),
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.get_function.assert_called_once_with(
'projects/test_project_id/locations/test_region/functions/helloWorld'
)
expected_body = deepcopy(VALID_BODY)
expected_body['labels'] = {
'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')
}
mock_hook.return_value.create_new_function.assert_called_once_with(
project_id='test_project_id',
location='test_region',
body=expected_body
)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_update_function_if_exists(self, mock_hook):
mock_hook.return_value.get_function.return_value = True
mock_hook.return_value.update_function.return_value = True
op = GcfFunctionDeployOperator(
project_id=GCP_PROJECT_ID,
location=GCP_LOCATION,
body=deepcopy(VALID_BODY),
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.get_function.assert_called_once_with(
'projects/test_project_id/locations/test_region/functions/helloWorld'
)
expected_body = deepcopy(VALID_BODY)
expected_body['labels'] = {
'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')
}
mock_hook.return_value.update_function.assert_called_once_with(
'projects/test_project_id/locations/test_region/functions/helloWorld',
expected_body, expected_body.keys())
mock_hook.return_value.create_new_function.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_empty_project_id_is_ok(self, mock_hook):
operator = GcfFunctionDeployOperator(
location="test_region",
body=deepcopy(VALID_BODY),
task_id="id"
)
operator._hook.get_function.side_effect = \
HttpError(resp=MOCK_RESP_404, content=b'not found')
operator.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
new_body = deepcopy(VALID_BODY)
new_body['labels'] = {
'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
mock_hook.return_value.create_new_function.assert_called_once_with(
project_id=None,
location="test_region",
body=new_body)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_empty_location(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
GcfFunctionDeployOperator(
project_id="test_project_id",
location="",
body=None,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter 'location' is missing", str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_empty_body(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=None,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter 'body' is missing", str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
@parameterized.expand([
(runtime,) for runtime in VALID_RUNTIMES
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_correct_runtime_field(self, runtime, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body['runtime'] = runtime
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
(network,) for network in [
"network-01",
"n-0-2-3-4",
"projects/PROJECT/global/networks/network-01"
"projects/PRÓJECT/global/networks/netwórk-01"
]
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_valid_network_field(self, network, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body['network'] = network
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
(labels,) for labels in [
{},
{"label": 'value-01'},
{"label_324234_a_b_c": 'value-01_93'},
]
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_valid_labels_field(self, labels, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body['labels'] = labels
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_validation_disabled(self, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = {
"name": "function_name",
"some_invalid_body_field": "some_invalid_body_field_value"
}
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
validate_body=False,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_body_validation_simple(self, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body['name'] = ''
with self.assertRaises(AirflowException) as cm:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The body field 'name' of value '' does not match",
str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
('name', '',
"The body field 'name' of value '' does not match"),
('description', '', "The body field 'description' of value '' does not match"),
('entryPoint', '', "The body field 'entryPoint' of value '' does not match"),
('availableMemoryMb', '0',
"The available memory has to be greater than 0"),
('availableMemoryMb', '-1',
"The available memory has to be greater than 0"),
('availableMemoryMb', 'ss',
"invalid literal for int() with base 10: 'ss'"),
('network', '', "The body field 'network' of value '' does not match"),
('maxInstances', '0', "The max instances parameter has to be greater than 0"),
('maxInstances', '-1', "The max instances parameter has to be greater than 0"),
('maxInstances', 'ss', "invalid literal for int() with base 10: 'ss'"),
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_invalid_field_values(self, key, value, message, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body[key] = value
with self.assertRaises(AirflowException) as cm:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn(message, str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
({'sourceArchiveUrl': ''},
"The body field 'source_code.sourceArchiveUrl' of value '' does not match"),
({'sourceArchiveUrl': '', 'zip_path': '/path/to/file'},
"Only one of 'sourceArchiveUrl' in body or 'zip_path' argument allowed."),
({'sourceArchiveUrl': 'gs://url', 'zip_path': '/path/to/file'},
"Only one of 'sourceArchiveUrl' in body or 'zip_path' argument allowed."),
({'sourceArchiveUrl': '', 'sourceUploadUrl': ''},
"Parameter 'sourceUploadUrl' is empty in the body and argument "
"'zip_path' is missing or empty."),
({'sourceArchiveUrl': 'gs://adasda', 'sourceRepository': ''},
"The field 'source_code.sourceRepository' should be of dictionary type"),
({'sourceUploadUrl': '', 'sourceRepository': ''},
"Parameter 'sourceUploadUrl' is empty in the body and argument 'zip_path' "
"is missing or empty."),
({'sourceArchiveUrl': '', 'sourceUploadUrl': '', 'sourceRepository': ''},
"Parameter 'sourceUploadUrl' is empty in the body and argument 'zip_path' "
"is missing or empty."),
({'sourceArchiveUrl': 'gs://url', 'sourceUploadUrl': 'https://url'},
"The mutually exclusive fields 'sourceUploadUrl' and 'sourceArchiveUrl' "
"belonging to the union 'source_code' are both present. Please remove one"),
({'sourceUploadUrl': 'https://url', 'zip_path': '/path/to/file'},
"Only one of 'sourceUploadUrl' in body "
"or 'zip_path' argument allowed. Found both."),
({'sourceUploadUrl': ''}, "Parameter 'sourceUploadUrl' is empty in the body "
"and argument 'zip_path' is missing or empty."),
({'sourceRepository': ''}, "The field 'source_code.sourceRepository' "
"should be of dictionary type"),
({'sourceRepository': {}}, "The required body field "
"'source_code.sourceRepository.url' is missing"),
({'sourceRepository': {'url': ''}},
"The body field 'source_code.sourceRepository.url' of value '' does not match"),
]
)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_invalid_source_code_union_field(self, source_code, message, mock_hook):
mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl'
body = deepcopy(VALID_BODY)
body.pop('sourceUploadUrl', None)
body.pop('sourceArchiveUrl', None)
zip_path = source_code.pop('zip_path', None)
body.update(source_code)
with self.assertRaises(AirflowException) as cm:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id",
zip_path=zip_path
)
op.execute(None)
err = cm.exception
self.assertIn(message, str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
({'sourceArchiveUrl': 'gs://url'}, 'test_project_id'),
({'zip_path': '/path/to/file', 'sourceUploadUrl': None}, 'test_project_id'),
({'zip_path': '/path/to/file', 'sourceUploadUrl': None}, None),
({'sourceUploadUrl':
'https://source.developers.google.com/projects/a/repos/b/revisions/c/paths/d'},
'test_project_id'),
({'sourceRepository':
{'url': 'https://source.developers.google.com/projects/a/'
'repos/b/revisions/c/paths/d'}},
'test_project_id'),
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_valid_source_code_union_field(self, source_code, project_id, mock_hook):
mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl'
mock_hook.return_value.get_function.side_effect = mock.Mock(
side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found'))
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body.pop('sourceUploadUrl', None)
body.pop('sourceArchiveUrl', None)
body.pop('sourceRepository', None)
body.pop('sourceRepositoryUrl', None)
zip_path = source_code.pop('zip_path', None)
body.update(source_code)
if project_id:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id",
zip_path=zip_path
)
else:
op = GcfFunctionDeployOperator(
location="test_region",
body=body,
task_id="id",
zip_path=zip_path
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
if zip_path:
mock_hook.return_value.upload_function_zip.assert_called_once_with(
project_id=project_id,
location='test_region',
zip_path='/path/to/file'
)
mock_hook.return_value.get_function.assert_called_once_with(
'projects/test_project_id/locations/test_region/functions/helloWorld'
)
mock_hook.return_value.create_new_function.assert_called_once_with(
project_id=project_id,
location='test_region',
body=body
)
mock_hook.reset_mock()
@parameterized.expand([
({'eventTrigger': {}},
"The required body field 'trigger.eventTrigger.eventType' is missing"),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b'}},
"The required body field 'trigger.eventTrigger.resource' is missing"),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b', 'resource': ''}},
"The body field 'trigger.eventTrigger.resource' of value '' does not match"),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b',
'resource': 'res',
'service': ''}},
"The body field 'trigger.eventTrigger.service' of value '' does not match"),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b',
'resource': 'res',
'service': 'service_name',
'failurePolicy': {'retry': ''}}},
"The field 'trigger.eventTrigger.failurePolicy.retry' "
"should be of dictionary type")
]
)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_invalid_trigger_union_field(self, trigger, message, mock_hook):
mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl'
body = deepcopy(VALID_BODY)
body.pop('httpsTrigger', None)
body.pop('eventTrigger', None)
body.update(trigger)
with self.assertRaises(AirflowException) as cm:
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id",
)
op.execute(None)
err = cm.exception
self.assertIn(message, str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
@parameterized.expand([
({'httpsTrigger': {}},),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b',
'resource': 'res'}},),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b',
'resource': 'res',
'service': 'service_name'}},),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/ą.b',
'resource': 'reś',
'service': 'service_namę'}},),
({'eventTrigger': {'eventType': 'providers/test/eventTypes/a.b',
'resource': 'res',
'service': 'service_name',
'failurePolicy': {'retry': {}}}},)
])
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_valid_trigger_union_field(self, trigger, mock_hook):
mock_hook.return_value.upload_function_zip.return_value = 'https://uploadUrl'
mock_hook.return_value.get_function.side_effect = mock.Mock(
side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found'))
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body.pop('httpsTrigger', None)
body.pop('eventTrigger', None)
body.update(trigger)
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id",
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.get_function.assert_called_once_with(
'projects/test_project_id/locations/test_region/functions/helloWorld'
)
mock_hook.return_value.create_new_function.assert_called_once_with(
project_id='test_project_id',
location='test_region',
body=body
)
mock_hook.reset_mock()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_extra_parameter(self, mock_hook):
mock_hook.return_value.create_new_function.return_value = True
body = deepcopy(VALID_BODY)
body['extra_parameter'] = 'extra'
op = GcfFunctionDeployOperator(
project_id="test_project_id",
location="test_region",
body=body,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.reset_mock()
class GcfFunctionDeleteTest(unittest.TestCase):
_FUNCTION_NAME = 'projects/project_name/locations/project_location/functions' \
'/function_name'
_DELETE_FUNCTION_EXPECTED = {
'@type': 'type.googleapis.com/google.cloud.functions.v1.CloudFunction',
'name': _FUNCTION_NAME,
'sourceArchiveUrl': 'gs://functions/hello.zip',
'httpsTrigger': {
'url': 'https://project_location-project_name.cloudfunctions.net'
'/function_name'},
'status': 'ACTIVE', 'entryPoint': 'entry_point', 'timeout': '60s',
'availableMemoryMb': 256,
'serviceAccountEmail': 'project_name@appspot.gserviceaccount.com',
'updateTime': '2018-08-23T00:00:00Z',
'versionId': '1', 'runtime': 'nodejs6'}
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_delete_execute(self, mock_hook):
mock_hook.return_value.delete_function.return_value = \
self._DELETE_FUNCTION_EXPECTED
op = GcfFunctionDeleteOperator(
name=self._FUNCTION_NAME,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.delete_function.assert_called_once_with(
'projects/project_name/locations/project_location/functions/function_name'
)
self.assertEqual(result['name'], self._FUNCTION_NAME)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_correct_name(self, mock_hook):
op = GcfFunctionDeleteOperator(
name="projects/project_name/locations/project_location/functions"
"/function_name",
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_invalid_name(self, mock_hook):
with self.assertRaises(AttributeError) as cm:
op = GcfFunctionDeleteOperator(
name="invalid_name",
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertEqual(str(err), 'Parameter name must match pattern: {}'.format(
FUNCTION_NAME_PATTERN))
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_empty_name(self, mock_hook):
mock_hook.return_value.delete_function.return_value = \
self._DELETE_FUNCTION_EXPECTED
with self.assertRaises(AttributeError) as cm:
GcfFunctionDeleteOperator(
name="",
task_id="id"
)
err = cm.exception
self.assertEqual(str(err), 'Empty parameter: name')
mock_hook.assert_not_called()
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_gcf_error_silenced_when_function_doesnt_exist(self, mock_hook):
op = GcfFunctionDeleteOperator(
name=self._FUNCTION_NAME,
task_id="id"
)
mock_hook.return_value.delete_function.side_effect = mock.Mock(
side_effect=HttpError(resp=MOCK_RESP_404, content=b'not found'))
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.delete_function.assert_called_once_with(
'projects/project_name/locations/project_location/functions/function_name'
)
@mock.patch('airflow.contrib.operators.gcp_function_operator.GcfHook')
def test_non_404_gcf_error_bubbled_up(self, mock_hook):
op = GcfFunctionDeleteOperator(
name=self._FUNCTION_NAME,
task_id="id"
)
resp = type('', (object,), {"status": 500})()
mock_hook.return_value.delete_function.side_effect = mock.Mock(
side_effect=HttpError(resp=resp, content=b'error'))
with self.assertRaises(HttpError):
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.delete_function.assert_called_once_with(
'projects/project_name/locations/project_location/functions/function_name'
)
|
odejesush/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/evaluable.py | 64 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`Evaluable` interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class Evaluable(object):
"""Interface for objects that are evaluatable by, e.g., `Experiment`.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def model_dir(self):
"""Returns a path in which the eval process will look for checkpoints."""
raise NotImplementedError
@abc.abstractmethod
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""Evaluates given model with provided evaluation data.
Stop conditions - we evaluate on the given input data until one of the
following:
- If `steps` is provided, and `steps` batches of size `batch_size` are
processed.
- If `input_fn` is provided, and it raises an end-of-input
exception (`OutOfRangeError` or `StopIteration`).
- If `x` is provided, and all items in `x` have been processed.
The return value is a dict containing the metrics specified in `metrics`, as
well as an entry `global_step` which contains the value of the global step
for which this evaluation was performed.
Args:
x: Matrix of shape [n_samples, n_features...] or dictionary of many matrices
containing the input samples for fitting the model. Can be iterator that returns
arrays of features or dictionary of array of features. If set, `input_fn` must
be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs] containing the
label values (class labels in classification, real numbers in
regression) or dictionary of multiple vectors/matrices. Can be iterator
that returns array of targets or dictionary of array of targets. If set,
`input_fn` must be `None`. Note: For classification, label values must
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`. If
`steps` is not provided, this should raise `OutOfRangeError` or
`StopIteration` after the desired amount of data (e.g., one epoch) has
been provided. See "Stop conditions" above for specifics.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration. Must be `None` if `input_fn` is provided.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`, if specified. Must be `None` if `input_fn` is
provided.
steps: Number of steps for which to evaluate model. If `None`, evaluate
until `x` is consumed or `input_fn` raises an end-of-input exception.
See "Stop conditions" above for specifics.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function.
Metric ops should support streaming, e.g., returning `update_op` and
`value` tensors. For example, see the options defined in
`../../../metrics/python/ops/metrics_ops.py`.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
Returns:
Returns `dict` with evaluation results.
"""
raise NotImplementedError
|
40223119/2015w11 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/posixpath.py | 722 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
# TODO: on Mac OS X, this should really return s.lower().
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
valid_types = all(isinstance(s, (str, bytes, bytearray))
for s in (a, ) + p)
if valid_types:
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components.") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
s2 = os.lstat(parent)
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
if isinstance(name, bytes):
name = str(name, 'ASCII')
if name in os.environ:
tail = path[j:]
value = os.environ[name]
if isinstance(path, bytes):
value = value.encode('ASCII')
path = path[:i] + value
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
|
phalt/django | refs/heads/master | django/conf/locale/es_PR/__init__.py | 12133432 | |
luoquan19/OMOOC2py | refs/heads/master | _src/om2py0w/0wex1/__init__.py | 12133432 | |
Chibuzor-IN/python-paystack | refs/heads/master | python_paystack/__init__.py | 12133432 | |
kingvuplus/italysat-enigma5 | refs/heads/master | lib/python/Plugins/SystemPlugins/PositionerSetup/__init__.py | 12133432 | |
jjo31/ATHAM-Fluidity | refs/heads/ToMerge | mayavi/mayavi_amcg/__init__.py | 12133432 | |
xq262144/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/admin_changelist/__init__.py | 12133432 | |
gaddman/ansible | refs/heads/devel | test/units/module_utils/common/__init__.py | 12133432 | |
wiktorek140/p2pool | refs/heads/master | p2pool/test/__init__.py | 12133432 | |
mtwestra/akvo-wandelenvoorwater | refs/heads/master | wvw/recaptchawidget/__init__.py | 12133432 | |
Batterfii/django | refs/heads/master | tests/generic_relations_regress/__init__.py | 12133432 | |
ErickMurillo/ciat_plataforma | refs/heads/master | comunicacion/contrapartes/forms.py | 3 | # -*- coding: UTF-8 -*-
from django.forms import ModelForm
from models import *
from django import forms
from ckeditor.widgets import CKEditorWidget
from django.contrib.auth.models import User
from comunicacion.contrapartes.widgets import ColorPickerWidget
from mapeo.models import Organizaciones
from analisis.configuracion.models import AreaAccion, SitioAccion, Plataforma
class ContraparteForms(forms.ModelForm):
temas = forms.CharField(widget=CKEditorWidget())
siglas = forms.CharField(widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Siglas o nombre corto"}))
generalidades = forms.CharField(widget=CKEditorWidget())
nombre = forms.CharField(widget=forms.TextInput(attrs={'class':'span7','rel':"tooltip", 'title':"Nombre completo de la contraparte"}))
fundacion = forms.CharField(widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Año en que fue fundada la organización"}))
contacto = forms.CharField(required=False,widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Nombre completo de la persona de contacto"}))
telefono = forms.IntegerField(required=False,widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Formato ### - ######## "}))
sitio_web = forms.URLField(required=False,widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Con este formato http://www.dominio.com "}))
rss = forms.CharField(required=False,widget=forms.TextInput(attrs={'rel':"tooltip", 'title':"Dirección rss de contenido sindicado"}))
font_color = forms.CharField(required=False, widget=ColorPickerWidget, label="Color")
area_accion = forms.ModelChoiceField(queryset=AreaAccion.objects.all())
sitio_accion = forms.ModelChoiceField(queryset=SitioAccion.objects.all())
plataforma= forms.ModelChoiceField(queryset=Plataforma.objects.all())
class Meta:
model = Organizaciones
exclude = ('user',)
class UserForm(ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
class UserProfileForm(ModelForm):
class Meta:
model = UserProfile
fields = ('avatar',)
class MensajeForm(forms.ModelForm):
user = forms.ModelMultipleChoiceField(queryset = User.objects.order_by('username'),
widget = forms.CheckboxSelectMultiple())
class Meta:
#widgets = {'user': forms.CheckboxSelectMultiple}
model = Mensajero
exclude = ('usuario','fecha')
|
annndrey/npui-unik | refs/heads/master | netprofile_xop/netprofile_xop/views.py | 3 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: XOP module - Views
# © Copyright 2014 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
from pyramid.i18n import (
TranslationStringFactory,
get_localizer
)
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPForbidden
from sqlalchemy.orm.exc import NoResultFound
from netprofile.common.factory import RootFactory
from netprofile.common.hooks import register_hook
from netprofile.db.connection import DBSession
from .models import (
ExternalOperation,
ExternalOperationProvider
)
_ = TranslationStringFactory('netprofile_xop')
@register_hook('core.dpanetabs.stashes.Stash')
def _dpane_stash_futures(tabs, model, req):
loc = get_localizer(req)
tabs.append({
'title' : loc.translate(_('External Operations')),
'iconCls' : 'ico-mod-externaloperation',
'xtype' : 'grid_xop_ExternalOperation',
'stateId' : None,
'stateful' : False,
'hideColumns' : ('stash',),
'extraParamProp' : 'stashid',
'createControllers' : 'NetProfile.core.controller.RelatedWizard'
})
class ClientRootFactory(RootFactory):
def __getitem__(self, uri):
if not self.req.user:
raise KeyError('Not logged in')
try:
sess = DBSession()
try:
xopp = sess.query(ExternalOperationProvider).filter(
ExternalOperationProvider.uri == uri,
ExternalOperationProvider.enabled == True
).one()
xopp.__parent__ = self
xopp.__name__ = xopp.uri
return xopp
except NoResultFound:
raise KeyError('Invalid URI')
except ValueError:
pass
raise KeyError('Invalid URI')
@view_config(
route_name='xop.cl.home',
name='',
context=ExternalOperationProvider,
permission='USAGE'
)
def xop_request(ctx, request):
# TODO: add optional redirect-to-site?
if not ctx.can_access(request):
raise HTTPForbidden('Access Denied')
gw = ctx.get_gateway()
if (not gw) or (not hasattr(gw, 'process_request')):
raise HTTPForbidden('Access Denied')
if not callable(gw.process_request):
raise HTTPForbidden('Access Denied')
try:
sess = DBSession()
xoplist = gw.process_request(request, sess)
except Exception as e:
# TODO: cancel and log?
raise HTTPForbidden('Access Denied')
for xop in xoplist:
ctx.check_operation(xop)
sess.add(xop)
if hasattr(gw, 'generate_response') and callable(gw.generate_response):
return gw.generate_response(request, xoplist)
resp = Response(body='OK', content_type='text/plain', charset='UTF-8')
return resp
|
unsiloai/syntaxnet-ops-hack | refs/heads/master | tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py | 22 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DatasetConstructorTest(test.TestCase):
def testTensorDataset(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDataset(self):
"""Test an dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDatasetWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int32, iterator.output_types["foo"])
self.assertEqual(dtypes.float32, iterator.output_types["bar"])
self.assertEqual((), iterator.output_shapes["foo"])
self.assertEqual((1,), iterator.output_shapes["bar"])
with self.test_session() as sess:
sess.run(init_op)
for i in range(3):
results = sess.run(get_next)
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseTensorSliceDataset(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.test_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.shuffle(10, 10)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.repeat(-1)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.filter(lambda x, y, z: True)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.take(5)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.batch(32)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset.output_shapes, [
s.as_list()
for s in nest.flatten(dataset.output_shapes)
]))
iterator = dataset.make_one_shot_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
iterator = dataset.make_initializable_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3]), (np.array(
[4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([], ([], []), []), dataset.output_shapes)
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3])
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([2, 3], dataset.output_shapes)
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
self.assertEquals(dtypes.int64, get_next.dtype)
self.assertEquals([3], get_next.shape)
if __name__ == "__main__":
test.main()
|
webOS-ports/qtwebkit | refs/heads/webOS-ports/master | Tools/QueueStatusServer/handlers/releasepatch.py | 121 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import template
from handlers.updatebase import UpdateBase
from loggers.recordpatchevent import RecordPatchEvent
from model.attachment import Attachment
from model.queues import Queue
class ReleasePatch(UpdateBase):
def get(self):
self.response.out.write(template.render("templates/releasepatch.html", None))
def post(self):
queue_name = self.request.get("queue_name")
# FIXME: This queue lookup should be shared between handlers.
queue = Queue.queue_with_name(queue_name)
if not queue:
self.error(404)
return
attachment_id = self._int_from_request("attachment_id")
attachment = Attachment(attachment_id)
last_status = attachment.status_for_queue(queue)
# Ideally we should use a transaction for the calls to
# WorkItems and ActiveWorkItems.
# Only remove it from the queue if the last message is not a retry request.
# Allow removing it from the queue even if there is no last_status for easier testing.
if not last_status or not last_status.is_retry_request():
queue.work_items().remove_work_item(attachment_id)
RecordPatchEvent.stopped(attachment_id, queue_name)
else:
RecordPatchEvent.retrying(attachment_id, queue_name)
# Always release the lock on the item.
queue.active_work_items().expire_item(attachment_id)
|
Peddle/hue | refs/heads/master | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Cipher/test_pkcs1_15.py | 118 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_pkcs1_15.py: Self-test for PKCS#1 v1.5 encryption
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import unittest
import sys
from Crypto.PublicKey import RSA
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto import Random
from Crypto.Cipher import PKCS1_v1_5 as PKCS
from Crypto.Util.py3compat import *
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\n', '\t', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
print clean
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
class PKCS1_15_Tests(unittest.TestCase):
def setUp(self):
self.rng = Random.new().read
self.key1024 = RSA.generate(1024, self.rng)
# List of tuples with test data for PKCS#1 v1.5.
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: plaintext
# Item #2: ciphertext
# Item #3: random data
_testData = (
#
# Generated with openssl 0.9.8o
#
(
# Private key
'''-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQDAiAnvIAOvqVwJTaYzsKnefZftgtXGE2hPJppGsWl78yz9jeXY
W/FxX/gTPURArNhdnhP6n3p2ZaDIBrO2zizbgIXs0IsljTTcr4vnI8fMXzyNUOjA
zP3nzMqZDZK6757XQAobOssMkBFqRWwilT/3DsBhRpl3iMUhF+wvpTSHewIDAQAB
AoGAC4HV/inOrpgTvSab8Wj0riyZgQOZ3U3ZpSlsfR8ra9Ib9Uee3jCYnKscu6Gk
y6zI/cdt8EPJ4PuwAWSNJzbpbVaDvUq25OD+CX8/uRT08yBS4J8TzBitZJTD4lS7
atdTnKT0Wmwk+u8tDbhvMKwnUHdJLcuIsycts9rwJVapUtkCQQDvDpx2JMun0YKG
uUttjmL8oJ3U0m3ZvMdVwBecA0eebZb1l2J5PvI3EJD97eKe91Nsw8T3lwpoN40k
IocSVDklAkEAzi1HLHE6EzVPOe5+Y0kGvrIYRRhncOb72vCvBZvD6wLZpQgqo6c4
d3XHFBBQWA6xcvQb5w+VVEJZzw64y25sHwJBAMYReRl6SzL0qA0wIYrYWrOt8JeQ
8mthulcWHXmqTgC6FEXP9Es5GD7/fuKl4wqLKZgIbH4nqvvGay7xXLCXD/ECQH9a
1JYNMtRen5unSAbIOxRcKkWz92F0LKpm9ZW/S9vFHO+mBcClMGoKJHiuQxLBsLbT
NtEZfSJZAeS2sUtn3/0CQDb2M2zNBTF8LlM0nxmh0k9VGm5TVIyBEMcipmvOgqIs
HKukWBcq9f/UOmS0oEhai/6g+Uf7VHJdWaeO5LzuvwU=
-----END RSA PRIVATE KEY-----''',
# Plaintext
'''THIS IS PLAINTEXT\x0A''',
# Ciphertext
'''3f dc fd 3c cd 5c 9b 12 af 65 32 e3 f7 d0 da 36
8f 8f d9 e3 13 1c 7f c8 b3 f9 c1 08 e4 eb 79 9c
91 89 1f 96 3b 94 77 61 99 a4 b1 ee 5d e6 17 c9
5d 0a b5 63 52 0a eb 00 45 38 2a fb b0 71 3d 11
f7 a1 9e a7 69 b3 af 61 c0 bb 04 5b 5d 4b 27 44
1f 5b 97 89 ba 6a 08 95 ee 4f a2 eb 56 64 e5 0f
da 7c f9 9a 61 61 06 62 ed a0 bc 5f aa 6c 31 78
70 28 1a bb 98 3c e3 6a 60 3c d1 0b 0f 5a f4 75''',
# Random data
'''eb d7 7d 86 a4 35 23 a3 54 7e 02 0b 42 1d
61 6c af 67 b8 4e 17 56 80 66 36 04 64 34 26 8a
47 dd 44 b3 1a b2 17 60 f4 91 2e e2 b5 95 64 cc
f9 da c8 70 94 54 86 4c ef 5b 08 7d 18 c4 ab 8d
04 06 33 8f ca 15 5f 52 60 8a a1 0c f5 08 b5 4c
bb 99 b8 94 25 04 9c e6 01 75 e6 f9 63 7a 65 61
13 8a a7 47 77 81 ae 0d b8 2c 4d 50 a5'''
),
)
def testEncrypt1(self):
for test in self._testData:
# Build the key
key = RSA.importKey(test[0])
# RNG that takes its random numbers from a pool given
# at initialization
class randGen:
def __init__(self, data):
self.data = data
self.idx = 0
def __call__(self, N):
r = self.data[self.idx:N]
self.idx += N
return r
# The real test
key._randfunc = randGen(t2b(test[3]))
cipher = PKCS.new(key)
ct = cipher.encrypt(b(test[1]))
self.assertEqual(ct, t2b(test[2]))
def testEncrypt2(self):
# Verify that encryption fail if plaintext is too long
pt = '\x00'*(128-11+1)
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.encrypt, pt)
def testVerify1(self):
for test in self._testData:
# Build the key
key = RSA.importKey(test[0])
# The real test
cipher = PKCS.new(key)
pt = cipher.decrypt(t2b(test[2]), "---")
self.assertEqual(pt, b(test[1]))
def testVerify2(self):
# Verify that decryption fails if ciphertext is not as long as
# RSA modulus
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.decrypt, '\x00'*127, "---")
self.assertRaises(ValueError, cipher.decrypt, '\x00'*129, "---")
# Verify that decryption fails if there are less then 8 non-zero padding
# bytes
pt = b('\x00\x02' + '\xFF'*7 + '\x00' + '\x45'*118)
ct = self.key1024.encrypt(pt, 0)[0]
ct = b('\x00'*(128-len(ct))) + ct
self.assertEqual("---", cipher.decrypt(ct, "---"))
def testEncryptVerify1(self):
# Encrypt/Verify messages of length [0..RSAlen-11]
# and therefore padding [8..117]
for pt_len in xrange(0,128-11+1):
pt = self.rng(pt_len)
cipher = PKCS.new(self.key1024)
ct = cipher.encrypt(pt)
pt2 = cipher.decrypt(ct, "---")
self.assertEqual(pt,pt2)
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_15_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
studio666/gnuradio | refs/heads/master | gr-digital/python/digital/qa_probe_density.py | 57 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_probe_density(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [0, 1, 0, 1]
expected_data = 1
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(1)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
self.assertEqual(expected_data, result_data)
def test_002(self):
src_data = [1, 1, 1, 1]
expected_data = 1
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(0.01)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
self.assertEqual(expected_data, result_data)
def test_003(self):
src_data = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
expected_data = 0.95243
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(0.01)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
print result_data
self.assertAlmostEqual(expected_data, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_probe_density, "test_probe_density.xml")
|
nfqsolutions/pylm | refs/heads/master | examples/template/master.py | 1 | from pylm.parts.servers import ServerTemplate
from pylm.parts.services import PullService, PubService, WorkerPullService, WorkerPushService, \
CacheService
server = ServerTemplate()
db_address = 'tcp://127.0.0.1:5559'
pull_address = 'tcp://127.0.0.1:5555'
pub_address = 'tcp://127.0.0.1:5556'
worker_pull_address = 'tcp://127.0.0.1:5557'
worker_push_address = 'tcp://127.0.0.1:5558'
server.register_inbound(PullService, 'Pull', pull_address, route='WorkerPush')
server.register_inbound(WorkerPullService, 'WorkerPull', worker_pull_address, route='Pub')
server.register_outbound(WorkerPushService, 'WorkerPush', worker_push_address)
server.register_outbound(PubService, 'Pub', pub_address)
server.register_bypass(CacheService, 'Cache', db_address)
server.preset_cache(name='server',
db_address=db_address,
pull_address=pull_address,
pub_address=pub_address,
worker_pull_address=worker_pull_address,
worker_push_address=worker_push_address)
if __name__ == '__main__':
server.start()
|
cvegaj/ElectriCERT | refs/heads/master | venv3/lib/python3.6/site-packages/pycoin/wallet/__init__.py | 25 |
# a wallet is a DB of Spendable objects, and a way to query and manage them
# The wallet accepts pycoinnet bitcoin events (_blockchain_update and _mempool_tx)
# and decides for itself which Spendable objects its interested in.
# Ideally, it should keep an archive of all Spendable objects its ever received so it can
# handle as many _blockchain_update rollbacks as it would like to.
# It should also keep a DB of all the transactions it's ever created, so it can watch for
# morphed versions (modulo malleability)
|
miguelparaiso/OdooAccessible | refs/heads/master | addons/warning/__openerp__.py | 261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Warning Messages and Alerts',
'version': '1.0',
'category': 'Tools',
'description': """
Module to trigger warnings in OpenERP objects.
==============================================
Warning messages can be displayed for objects like sale order, purchase order,
picking and invoice. The message is triggered by the form's onchange event.
""",
'author': 'OpenERP SA',
'depends': ['base', 'sale_stock', 'purchase'],
'data': ['warning_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
MwanzanFelipe/rockletonfortune | refs/heads/master | lib/django/contrib/gis/geoip/libgeoip.py | 479 | import os
from ctypes import CDLL
from ctypes.util import find_library
from django.conf import settings
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {key: getattr(settings, key)
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key)}
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH')
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name:
lib_path = find_library(lib_name)
if lib_path is None:
raise RuntimeError('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Getting the C `free` for the platform.
if os.name == 'nt':
libc = CDLL('msvcrt')
else:
libc = CDLL(None)
free = libc.free
|
ncss-tech/ssurgoOnDemand | refs/heads/master | SOD/SDA_hydric.py | 1 | #-------------------------------------------------------------------------------
#
#Chad Ferguson
#USDA-NRCS
#Soil Survey Division
#Mid-Atlantic and Caribbean Area Regional Office
#Raleigh, NC
#
# Created: 31/03/2015
#
#This tool grabs interprtations from Soil Data Access and aggregates based on user specified method.
#It is designed to be used as a BATCH tool
#Soil Data Access SQL code is from Jason Nemecek
#SOAP request code is from Steve Peaslee's SSURGO Download Tool - Downlaod By Map's validation class
#
#
#
#
#
#-------------------------------------------------------------------------------
class ForceExit(Exception):
pass
def AddMsgAndPrint(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
#arcpy.AddMessage(" ")
arcpy.AddError(string)
except:
pass
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + " \n" + str(sys.exc_type)+ ": " + str(sys.exc_value)
AddMsgAndPrint(theMsg, 2)
except:
AddMsgAndPrint("Unhandled error in errorMsg method", 2)
pass
def CreateNewTable(newTable, columnNames, columnInfo):
# Create new table. Start with in-memory and then export to geodatabase table
#
# ColumnNames and columnInfo come from the Attribute query JSON string
# MUKEY would normally be included in the list, but it should already exist in the output featureclass
#
try:
# Dictionary: SQL Server to FGDB
dType = dict()
dType["int"] = "long"
dType["smallint"] = "short"
dType["bit"] = "short"
dType["varbinary"] = "blob"
dType["nvarchar"] = "text"
dType["varchar"] = "text"
dType["char"] = "text"
dType["datetime"] = "date"
dType["datetime2"] = "date"
dType["smalldatetime"] = "date"
dType["decimal"] = "double"
dType["numeric"] = "double"
dType["float"] = "double"
# numeric type conversion depends upon the precision and scale
dType["numeric"] = "float" # 4 bytes
dType["real"] = "double" # 8 bytes
# Iterate through list of field names and add them to the output table
i = 0
# ColumnInfo contains:
# ColumnOrdinal, ColumnSize, NumericPrecision, NumericScale, ProviderType, IsLong, ProviderSpecificDataType, DataTypeName
# PrintMsg(" \nFieldName, Length, Precision, Scale, Type", 1)
joinFields = list()
outputTbl = os.path.join("IN_MEMORY", os.path.basename(newTable))
arcpy.CreateTable_management(os.path.dirname(outputTbl), os.path.basename(outputTbl))
for i, fldName in enumerate(columnNames):
vals = columnInfo[i].split(",")
length = int(vals[1].split("=")[1])
precision = int(vals[2].split("=")[1])
scale = int(vals[3].split("=")[1])
dataType = dType[vals[4].lower().split("=")[1]]
if fldName.lower().endswith("key"):
# Per SSURGO standards, key fields should be string. They come from Soil Data Access as long integer.
dataType = 'text'
length = 30
arcpy.AddField_management(outputTbl, fldName, dataType, precision, scale, length)
return outputTbl
except:
errorMsg()
return False
def getHyd(areaSym):
import socket
from urllib2 import HTTPError, URLError
try:
hydQry = \
"""SELECT
AREASYMBOL,
MUSYM,
MUNAME,
mu.mukey/1 AS MUKEY,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey) AS comp_count,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND majcompflag = 'Yes') AS count_maj_comp,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND hydricrating = 'Yes' ) AS all_hydric,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND majcompflag = 'Yes' AND hydricrating = 'Yes') AS maj_hydric,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND majcompflag = 'Yes' AND hydricrating != 'Yes') AS maj_not_hydric,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND majcompflag != 'Yes' AND hydricrating = 'Yes' ) AS hydric_inclusions,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND hydricrating != 'Yes') AS all_not_hydric,
(SELECT TOP 1 COUNT_BIG(*)
FROM mapunit
INNER JOIN component ON component.mukey=mapunit.mukey AND mapunit.mukey = mu.mukey
AND hydricrating IS NULL ) AS hydric_null
INTO #main_query
FROM legend AS l
INNER JOIN mapunit AS mu ON mu.lkey = l.lkey AND l.areasymbol IN ("""+ areaSym + """)
SELECT AREASYMBOL, MUKEY, MUSYM, MUNAME,
CASE WHEN comp_count = all_not_hydric + hydric_null THEN 'Nonhydric'
WHEN comp_count = all_hydric THEN 'Hydric'
WHEN comp_count != all_hydric AND count_maj_comp = maj_hydric THEN 'Predominantly Hydric'
WHEN hydric_inclusions >= 0.5 AND maj_hydric < 0.5 THEN 'Predominantly Nonhydric'
WHEN maj_not_hydric >= 0.5 AND maj_hydric >= 0.5 THEN 'Partially Hydric' ELSE 'Error' END AS HYDRIC_RATING
FROM #main_query"""
#print hydQry.replace(">", ">").replace("<", "<")
hydQry = hydQry.replace(">", ">").replace("<", "<")
#theURL = "https://sdmdataaccess.nrcs.usda.gov"
#url = theURL + "/Tabular/SDMTabularService/post.rest"
url = r'https://SDMDataAccess.sc.egov.usda.gov/Tabular/post.rest'
# Create request using JSON, return data as JSON
request = {}
request["format"] = "JSON+COLUMNNAME+METADATA"
request["query"] = hydQry
#json.dumps = serialize obj (request dictionary) to a JSON formatted str
data = json.dumps(request)
# Send request to SDA Tabular service using urllib2 library
# because we are passing the "data" argument, this is a POST request, not a GET
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
code = response.getcode()
cResponse = bhrh.responses.get(code)
cResponse = "{}; {}".format(cResponse[0], cResponse[1])
# read query results
qResults = response.read()
# Convert the returned JSON string into a Python dictionary.
qData = json.loads(qResults)
# get rid of objects
del qResults, response, req
# if dictionary key "Table" is found
if "Table" in qData:
cResponse = 'OK'
return True, qData, cResponse
else:
cResponse = 'Failed'
return False, None, cResponse
except socket.timeout as e:
Msg = 'Soil Data Access timeout error'
return False, None, Msg
except socket.error as e:
Msg = state + " = " + str(e)
return False, None, Msg
except HTTPError as e:
Msg = state + " = " + str(e)
return False, None, Msg
except URLError as e:
Msg = state + " = " + str(e)
return False, None, Msg
except:
errorMsg()
Msg = 'Unknown error collecting interpreations for ' + eSSA
return False, Msg, None
#===============================================================================
import arcpy, sys, os, traceback, time, httplib, urllib2, json
from BaseHTTPServer import BaseHTTPRequestHandler as bhrh
arcpy.env.overwriteOutput = True
AddMsgAndPrint('\n \n')
areaParam = arcpy.GetParameterAsText(1)
WS = arcpy.GetParameterAsText(2)
jLayer = arcpy.GetParameterAsText(3)
#arcpy.AddMessage(nullParam)
srcDir = os.path.dirname(sys.argv[0])
try:
areaList = areaParam.split(";")
states = list(set([s[:2] for s in areaList]))
states.sort()
failHyd = list()
jobCnt = len(states)
n=0
arcpy.SetProgressor('step', 'Starting Parent Material Group Name Tool...', 0, jobCnt, 1)
tblName = "SOD_hydricrating"
# for eSSA in areaList:
for state in states:
p = [x for x in areaList if x[:2] == state]
theReq = ",".join(map("'{0}'".format, p))
n+=1
arcpy.SetProgressorLabel('Collecting parent material table for: ' + state + " (" + str(n) + ' of ' + str(jobCnt) + ')')
#send the request
hydLogic, hydData, hydMsg = getHyd(theReq)
#if it was successful...
if hydLogic:
if len(hydData) == 0:
AddMsgAndPrint('No records returned for ' + state, 1)
failHyd.append(state)
arcpy.SetProgressorPosition()
else:
AddMsgAndPrint('Response for hydric summary request on ' + state + ' = ' + hydMsg)
hydRes = hydData["Table"]
if not arcpy.Exists(WS + os.sep + tblName):
columnNames = hydRes.pop(0)
columnInfo = hydRes.pop(0)
newTable = CreateNewTable(tblName, columnNames, columnInfo)
with arcpy.da.InsertCursor(newTable, columnNames) as cursor:
for row in hydRes:
cursor.insertRow(row)
# convert from in-memory to table on disk
arcpy.conversion.TableToTable(newTable, WS, tblName)
arcpy.SetProgressorPosition()
else:
columnNames = hydRes.pop(0)
columnInfo = hydRes.pop(0)
with arcpy.da.InsertCursor(WS + os.sep + tblName, columnNames) as cursor:
for row in hydRes:
cursor.insertRow(row)
arcpy.SetProgressorPosition()
#if it was unsuccessful...
else:
#try again
hydLogic, hydData, hydMsg = getHyd(theReq)
#if 2nd run was successful
if hydLogic:
if len(hydData) == 0:
AddMsgAndPrint('No records returned for ' + state , 1)
failHyd.append(state)
arcpy.SetProgressorPosition()
else:
AddMsgAndPrint('Response for hydric summary request on ' + state + ' = ' + hydMsg + ' - 2nd attempt')
hydRes = hydData["Table"]
if not arcpy.Exists(WS + os.sep + tblName):
columnNames = hydRes.pop(0)
columnInfo = hydRes.pop(0)
newTable = CreateNewTable(tblName, columnNames, columnInfo)
with arcpy.da.InsertCursor(newTable, columnNames) as cursor:
for row in hydRes:
cursor.insertRow(row)
# convert from in-memory to table on disk
arcpy.conversion.TableToTable(newTable, WS, tblName)
arcpy.SetProgressorPosition()
else:
columnNames = hydRes.pop(0)
columnInfo = hydRes.pop(0)
with arcpy.da.InsertCursor(WS + os.sep + tblName, columnNames) as cursor:
for row in hydRes:
cursor.insertRow(row)
arcpy.SetProgressorPosition()
#if 2nd run was unsuccesful that's' it
else:
AddMsgAndPrint('Response for hydric summary request on ' + state + ' = ' + hydMsg + ' - 2nd attempt')
failHyd.append(state)
arcpy.SetProgressorPosition()
arcpy.AddMessage('\n')
##########################################################################################################
if jLayer != "":
try:
mxd = arcpy.mapping.MapDocument("CURRENT")
df = mxd.activeDataFrame
objLyr = arcpy.mapping.ListLayers(mxd, jLayer, df)
refLyr = objLyr[0]
desc = arcpy.Describe(jLayer)
dType = desc.dataType.upper()
path = desc.catalogPath
bName = desc.baseName
flds = [x.name for x in desc.fields]
if not "MUKEY" in flds:
arcpy.env.addOutputsToMap = True
AddMsgAndPrint('\n \nReloading ' + jLayer + ' due to existing join')
if dType == 'RASTERLAYER':
arcpy.mapping.RemoveLayer(df, refLyr)
arcpy.MakeRasterLayer_management(path, bName)
arcpy.management.AddJoin(bName, "MUKEY", os.path.join(WS,tblName), "MUKEY")
AddMsgAndPrint('\n \nAdded join to ' + jLayer)
elif dType == 'FEATURELAYER':
arcpy.mapping.RemoveLayer(df, refLyr)
arcpy.MakeFeatureLayer_management(path, bName)
arcpy.management.AddJoin(bName, "MUKEY", os.path.join(WS,tblName), "MUKEY")
AddMsgAndPrint('\n \nAdded join to ' + jLayer)
else:
arcpy.management.AddJoin(jLayer, "MUKEY", os.path.join(WS,tblName), "MUKEY")
AddMsgAndPrint('\n \nAdded join to ' + jLayer)
except:
errorMsg()
AddMsgAndPrint('\n \nUnable to make join to ' + jLayer)
if len(failHyd) > 0:
AddMsgAndPrint('\n \nThe following interpretations either failed or collected no records:', 1)
for f in failHyd:
AddMsgAndPrint(f)
AddMsgAndPrint('\n \n')
except:
errorMsg()
|
lunafeng/django | refs/heads/master | django/template/smartif.py | 275 | """
Parser and utilities for the smart 'if' tag
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
# This should be removed in Django 1.10:
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i + 1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next_token()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
if token == '=':
warnings.warn(
"Operator '=' is deprecated and will be removed in Django 1.10. Use '==' instead.",
RemovedInDjango110Warning, stacklevel=2
)
return op()
def next_token(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next_token()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next_token()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
|
Alwnikrotikz/smap-data | refs/heads/master | python/smap/csvloader.py | 6 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
# this is the twisted event loop
from twisted.internet import reactor
# use this to get a smap source in one line
import loader
# autoflush means that we don't call flush on a timer
s = loader.load('default.ini', autoflush=None)
CHUNKSIZE=1000
i = 0
def fail(err):
print "Received error while delivering reports"
reactor.stop()
def do_add(*args):
global i
global CHUNKSIZE
if i > 10000:
reactor.stop()
else:
# publish a bunch of data
for v in xrange(i*CHUNKSIZE, i*CHUNKSIZE+CHUNKSIZE):
s.get_timeseries('/sensor0')._add(0, v)
i += 1
print "flush", CHUNKSIZE
# then flush. we'll get a callback once we've sent it to all
# of the destinations
d = s.reports._flush()
d.addCallback(do_add)
d.addErrback(fail)
reactor.callFromThread(do_add)
reactor.run()
|
pavlobaron/disco_playground | refs/heads/master | tests/test_protocol.py | 2 | from datetime import datetime
from disco.error import JobError
from disco.test import TestCase, TestJob
class ProtocolJob(TestJob):
pass
class ProtocolTestCase(TestCase):
def serve(self, path):
return 'data\n' * 5
def new_job(self, **kwargs):
return ProtocolJob().run(input=self.test_server.urls([1]), **kwargs)
def test_single_line_error(self):
def map(e, params):
import sys, disco.json
msg = disco.json.dumps("Single line error!")
sys.stderr.write('FATAL %d %s\n' % (len(msg), msg))
self.job = self.new_job(map=map)
self.assertRaises(JobError, self.job.wait)
def test_single_line_message(self):
def map(e, params):
import sys, disco.json
msg = disco.json.dumps("Single line message")
sys.stderr.write('MSG %d %s\n' % (len(msg), msg))
return []
self.job = self.new_job(map=map)
self.assertResults(self.job, [])
def test_binary_message(self):
def map(e, params):
print '\x00\x001\xc9D\x8b-\xa0\x99 \x00\xba\xc0\xe5`\x00H\x89'
'\xdeH\x8b=\x81\x99 \x00\xe8\x04\xeb\xff\xff\x85\xc0\x0f\x88l'
return []
self.job = self.new_job(map=map)
self.assertResults(self.job, [])
def test_utf8_message(self):
def has_valid_event(events):
msg = u'\xc4\xe4rett\xf6myys'
for n, e in events:
if msg in e[2]:
return True
def map(e, params):
print u'\xc4\xe4rett\xf6myys'
return []
self.job = self.new_job(map=map)
self.assertResults(self.job, [])
self.assertTrue(has_valid_event(self.job.events()))
def test_non_utf8_message(self):
def map(e, params):
print u'\xc4\xe4rett\xf6myys'.encode('latin-1')
return []
self.job = self.new_job(map=map)
self.assertResults(self.job, [])
|
utkarsh-goswami/erpnext | refs/heads/develop | erpnext/projects/doctype/activity_type/activity_type.py | 116 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class ActivityType(Document):
pass |
cwolferh/heat-scratch | refs/heads/master | heat/objects/__init__.py | 12133432 | |
diorcety/intellij-community | refs/heads/master | python/testData/override/classmethod.py | 83 | class A:
@classmethod
def foo(cls):
cls.k = 3
class B(A):
<caret>pass
|
unodba/mirror | refs/heads/master | mirror/scheduler.py | 2 | #
# Copyright (C) 2013 Shang Yuanchun <idealities@gmail.com>
#
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# mirror is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mirror. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
#
"""Scheduler for Mirror :("""
import os, sys
import time
import fcntl
import signal
import logging
import weakref
import mmap
import struct
try:
import cPickle as pickle
except:
import pickle
import mirror.common
import mirror.error
import mirror.handler
import mirror.event
import mirror.component as component
from mirror.configmanager import ConfigManager
from mirror.task import Task, SimpleTask, TASK_TYPES
from mirror.task import PRIORITY_MIN, PRIORITY_MAX
from mirror.task import REGULAR_TASK, TIMEOUT_TASK, SYSTEM_TASK
from mirror.sysinfo import loadavg, tcpconn
from mirror.queue import TaskInfo, Queue
from mirror.component import Component
from collections import OrderedDict as odict
log = logging.getLogger(__name__)
class Scheduler(Component):
CHECK_TIMEOUT = 0x01
SCHEDULE_TASK = 0x02
def __init__(self, options = None, args = None):
# The name is "Scheduler"
super(Scheduler, self).__init__(self.__class__.__name__)
# self.tasks contains all tasks needed to run in theory,
# including the tasks that are not enabled
self.config = ConfigManager("mirror.ini")
self.tasks = odict()
self.queue = Queue()
self.todo = self.SCHEDULE_TASK
# the number of tasks that enabled
self.active_tasks = -1
self.roused_by_child = False
self.init_general(self.config)
self.init_tasks (self.config)
def start(self):
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.MirrorStartEvent())
while (True):
self.sleep()
if not self.roused_by_child:
log.info("I am waking up...")
self.schedule()
TODO = { REGULAR_TASK : SCHEDULE_TASK,
SYSTEM_TASK : SCHEDULE_TASK,
TIMEOUT_TASK : CHECK_TIMEOUT,
}
def sleep(self):
self.append_tasks()
self.write_mmap()
nexttask = self.queue[0]
self.todo = 0
if nexttask:
for taskinfo in self.queue:
if taskinfo.time > nexttask.time:
break
self.todo |= self.TODO.get(taskinfo.tasktype, 0)
# nexttask.time - time.time() is
# the duration we can sleep...
if nexttask:
sleeptime = nexttask.time - time.time()
else:
sleeptime = 1800 # half an hour
log.info("I am going to sleep, next waking up: %s",
time.ctime(time.time() + sleeptime))
self.roused_by_child = False
time.sleep(sleeptime)
def schedule(self):
if self.queue.empty():
log.info("But no task needed to start...")
return
self.init_sysinfo()
# we do not need microseconds
curtime = int(time.time())
taskqueue = [ taskinfo for taskinfo in self.queue ]
if ( self.todo & self.SCHEDULE_TASK ):
# to move to zero second
timestamp = curtime
timestamp -= curtime % 60
# next miniute
end = timestamp + 60
for taskinfo in taskqueue:
if self.TODO.get(taskinfo.tasktype, 0) != self.SCHEDULE_TASK:
continue
if taskinfo.time < timestamp:
log.info("Strange problem happened,"
"task: %s schedule time is in past,"
"maybe I sleeped too long...", taskinfo.name)
self.queue.remove(taskinfo)
self.append_task(taskinfo.name, self.tasks[taskinfo.name], since = end)
if taskinfo.time >= end:
break
if taskinfo.time >= timestamp and taskinfo.time < end:
self.schedule_task(taskinfo)
if ( self.todo & self.CHECK_TIMEOUT ):
for taskinfo in taskqueue:
if self.TODO.get(taskinfo.tasktype, 0) != self.CHECK_TIMEOUT:
continue
if taskinfo.time <= curtime:
log.info("Task: %s timeouts",
taskinfo.name)
self.stop_task(taskinfo)
if taskinfo.time > curtime:
break
def schedule_task(self, taskinfo):
"""
Schedule a task, but it is not guaranteed that it will really be run, it is
decided by some conditions, e.g. system load, current http connections.
NOTE:
The priority that a task can run is a function of ( current value / limit ).
See get_runnable_priority(). However an exception is `maxtasks`, if current
running tasks is reaching `maxtasks`, only specific priority (lower than or
equal to 2) tasks can still be running.
"""
task = self.tasks[taskinfo.name]
if task.isinternal:
self.run_system_task(taskinfo)
return
if task.priority > self.get_runnable_priority(self.current_load, self.loadlimit):
log.info("Task: %s not scheduled because system load %.2f is too high",
taskinfo.name, self.current_load)
self.delay_task(taskinfo)
return
if task.priority > self.get_runnable_priority(self.current_conn, self.httpconn):
log.info("Task: %s not scheduled because http connections is too many",
taskinfo.name)
self.delay_task(taskinfo)
return
if self.maxtasks > 0 and self.count_running_tasks() >= self.maxtasks and task.priority > 2:
log.info("Task: %s not scheduled because running tasks is larger than %d",
taskinfo, self.maxtasks)
self.delay_task(taskinfo)
return
log.info("Starting task: %s ...", taskinfo.name)
self.run_task(taskinfo)
def init_sysinfo(self):
"""
Get system info for this turn of schedule().
"""
self.current_load = loadavg()
self.current_conn = tcpconn()
def delay_task(self, taskinfo, delay_seconds = 1800):
"""
If a task is not scheduled due to some reason, it will be
delayed for `delay_seconds` seconds (wich is default half
an hour), when task's next schedule time is later than that,
else it's set to task's next schedule time.
"""
if taskinfo not in self.queue:
return
task = self.tasks[taskinfo.name]
next_time = task.get_schedule_time(since = time.time())
if taskinfo.time + delay_seconds > next_time:
taskinfo.time = next_time
else:
# In python objects are passed by reference
taskinfo.time += delay_seconds
self.reappend_task(task, taskinfo)
def count_running_tasks(self):
"""
Calculate the number of current running tasks.
"""
running = 0
for taskname, task in self.tasks.iteritems():
if task.isinternal:
continue
running += task.running
return running
def append_tasks(self):
"""
Append the tasks that are needed to run into self.queue.
NOTE:
If a task is currently running or it is not enabled, it will
not be added to the queue.
"""
now = time.time()
for taskname in self.tasks:
task = self.tasks[taskname]
self.append_task(taskname, task, since = now)
def append_task(self, taskname, task, since):
"""
In some cases a task with same name may be ignored if there
is a running one, but this is a feature, not a bug...
"""
if task.running and task.timeout > 0:
return
if not task.enabled:
return
taskinfo = TaskInfo(taskname, (SYSTEM_TASK if task.isinternal else REGULAR_TASK),
task.get_schedule_time(since), task.priority)
# for system tasks and tasks-without-timeout-set, task is appended again
# in run_*_task() method, for tasks-with-timeout-set, a timeout task
# with same name is appended in run_task()
if taskinfo in self.queue:
return
self.queue.put(taskinfo)
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.TaskEnqueueEvent(taskname))
def reappend_task(self, task, taskinfo):
"""
Remove a taskinfo from queue and put it in again,
to keep the queue in order.
"""
if taskinfo not in self.queue:
return
self.queue.remove(taskinfo)
self.queue.put(taskinfo)
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.TaskEnqueueEvent(taskinfo.name))
DEFAULT_BUFFER_SIZE = 10240
def write_mmap(self):
data = pickle.dumps(self.queue)
size = len(data) + 2 + 4
if not hasattr(self, "buffer") or self.buffersz < size:
self.buffersz = max(self.DEFAULT_BUFFER_SIZE, size)
if hasattr(self, "buffer"):
self.buffer.close()
self.bufferfd = os.open("/tmp/mirrord",
os.O_CREAT | os.O_TRUNC | os.O_RDWR,
0o644)
flag = fcntl.fcntl(self.bufferfd, fcntl.F_GETFD)
fcntl.fcntl(self.bufferfd, fcntl.F_SETFD, flag | fcntl.FD_CLOEXEC)
os.write(self.bufferfd, '\x00' * self.buffersz)
self.buffer = mmap.mmap(self.bufferfd, self.buffersz, mmap.MAP_SHARED, mmap.PROT_WRITE)
# close bufferfd
os.close(self.bufferfd)
self.buffer.write("\x79\x71")
self.buffer.seek(2)
self.buffer.write(struct.pack("I", size))
self.buffer.write(data)
def stop(self):
log.info("Stopping mirror scheduler")
if not hasattr(self, "buffer"):
return
self.buffer.close()
os.unlink("/tmp/mirrord")
def append_timeout_task(self, taskname, task, time):
"""
A timeout checking task is added after a task begins to run.
"""
if not task.running:
return
if not task.enabled:
return
taskinfo = TaskInfo(taskname, TIMEOUT_TASK,
time, task.priority)
if taskinfo in self.queue:
return
self.queue.put(taskinfo)
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.TaskEnqueueEvent(taskname))
def remove_timeout_task(self, taskname):
"""
It's slow...
"""
taskqueue = [ taskinfo for taskinfo in self.queue ]
for taskinfo in taskqueue:
if taskinfo.name == taskname and taskinfo.tasktype == TIMEOUT_TASK:
self.queue.remove(taskinfo)
return
def init_general(self, config):
self.emails = []
self.loadlimit = 4.0
self.httpconn = 1200
self.logdir = mirror.common.DEFAULT_TASK_LOG_DIR
self.maxtasks = 10
if "general" not in config:
log.error("Error in config file, no `general` section, will use default setting.")
return
import re
emails = re.compile(r"([^@\s]+@[^@\s,]+)")
emails = emails.findall(config['general']['emails'])
for email in emails:
self.emails.append(email)
self.loadlimit = float(config['general']['loadlimit'])
self.httpconn = int (config['general']['httpconn'] )
self.maxtasks = int (config['general']['maxtasks'] )
self.logdir = config['general']['logdir']
if self.logdir[-1] != os.path.sep:
self.logdir += os.path.sep
def reload_config(self):
log.info("Stopping running tasks...")
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
self.stop_all_tasks()
signal.signal(signal.SIGCHLD, mirror.handler.sigchld_handler)
log.info("Clearing old data...")
for taskname, task in self.tasks.iteritems():
if not task.isinternal:
del self.tasks[taskname]
self.queue = Queue()
log.info("Reloading new configs...")
self.config = ConfigManager("mirror.ini", need_reload = True)
self.init_general(self.config)
self.init_tasks (self.config)
def init_tasks(self, config):
for mirror in config:
if mirror == 'general':
continue
# We think it's default mirror.task.Task
task_class = TASK_TYPES.get(config[mirror].get("type", None), Task)
self.tasks[mirror] = task_class(mirror, weakref.ref(self), **config[mirror])
self.active_tasks = len(
[mirror for mirror, task in self.tasks.iteritems() if task.enabled])
def run_system_task(self, taskinfo):
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.RunSystemTaskEvent(taskinfo))
# after we run the system task, we need to update the queue
# or else the sleeptime will be invalid
if taskinfo in self.queue:
self.queue.remove(taskinfo)
task = self.tasks[taskinfo.name]
self.append_task(taskinfo.name, task, time.time())
def run_task(self, taskinfo, stage = 1):
if taskinfo.name not in self.tasks:
return
task = self.tasks[taskinfo.name]
# for tasks that is still running when next schedule time
# is reached (but has no timeout set), we just need to
# reappend it.
if task.running and task.timeout <= 0:
taskinfo.time = task.get_schedule_time(since = time.time())
self.reappend_task(task, taskinfo)
if task.running and ( not task.twostage ):
log.info("Task: %s is still running and no timeout set, skipped", taskinfo.name)
return
event_manager = component.get("EventManager")
event_manager.emit(mirror.event.PreTaskStartEvent(taskinfo.name))
task.run(stage)
if taskinfo in self.queue:
self.queue.remove(taskinfo)
log.info("Task: %s begin to run with pid %d", taskinfo.name, task.pid)
event_manager.emit(mirror.event.TaskStartEvent(taskinfo.name, task.pid))
if task.timeout <= 0:
self.append_task(taskinfo.name, task, time.time())
else:
self.append_timeout_task(taskinfo.name, task,
task.start_time + task.timeout)
def stop_task(self, taskinfo):
"""
Stop a task, it should only be called when that task timeouts.
"""
if taskinfo.name not in self.tasks:
return
task = self.tasks[taskinfo.name]
if not task.running:
return
pid = task.pid
# Python's SIGCHLD sometimes has delay in calling its handler,
# we have to disable sigchld_handler() here.
# More: http://utcc.utoronto.ca/~cks/space/blog/python/CPythonSignals
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
task.stop()
self.stop_task_manually(task, pid)
signal.signal(signal.SIGCHLD, mirror.handler.sigchld_handler)
def stop_task_manually(self, task, pid):
"""
Without SIGCHLD handler, we have to waitpid() here.
"""
pid, status = os.waitpid(pid, 0)
endstr, code = self.parse_return_status(status)
task.code = code
log.info("Killed task: %s %s %d, pid %d", task.name, endstr, code, pid)
self.remove_timeout_task(task.name)
self.task_finished(task)
def stop_task_with_pid(self, pid, status):
"""
This is called when we got a SIGCHLD signal.
Change task's running and pid attr as it's stopped.
"""
self.roused_by_child = True
for taskname, task in self.tasks.iteritems():
if task.isinternal:
continue
if task.pid == pid:
if not task.running:
return
endstr, code = self.parse_return_status(status)
task.code = code
log.info("Task: %s %s %d, pid %d", taskname, endstr, code, pid)
self.remove_timeout_task(taskname)
self.task_finished(task)
return
def task_finished(self, task):
"""
Check whether a task needs post process, e.g. two stage tasks.
"""
event_manager = component.get("EventManager")
if not task.twostage:
event_manager.emit(mirror.event.TaskStopEvent(task.name, task.pid, task.code))
self.task_autoretry(task)
task.set_stop_flag()
return
if task.stage == 1:
log.info("Task: %s scheduled to second stage", task.name)
self.run_task(TaskInfo(task.name, REGULAR_TASK, 0, task.priority), stage = 2)
else:
event_manager.emit(mirror.event.TaskStopEvent(task.name, task.pid, task.code))
self.task_autoretry(task)
task.set_stop_flag()
task.stage = 1
def task_autoretry(self, task):
"""
If a task has a valid `autoretry`, and its interval is before next normal schedule,
it will be used.
"""
if task.autoretry <= 0:
return
if task.code == 0:
return
curtime = int(time.time())
next_time = task.get_schedule_time(since = curtime)
if curtime + task.autoretry < next_time:
taskinfo = self.queue[task.name]
taskinfo.time = next_time
self.reappend_task(task, taskinfo)
def stop_all_tasks(self, signo = signal.SIGTERM):
"""
This method can only be called when mirrord is shut down by SIGTERM or SIGINT.
NOTE:
Currently when mirrord is shut down, all running tasks will also be killed.
"""
event_manager = component.get("EventManager")
for taskname, task in self.tasks.iteritems():
if task.isinternal:
continue
if not task.running:
continue
pid = task.pid
task.stop(signo)
# Not sure it is ok...
pid, status = os.waitpid(pid, 0)
endstr, code = self.parse_return_status(status)
task.code = code
event_manager.emit(mirror.event.TaskStopEvent(task.name, task.pid, task.code))
log.info("Killed task: %s with pid %d", taskname, pid)
@classmethod
def get_runnable_priority(cls, current, limit):
"""
If limit is zero, all priority tasks can be run.
Else if current value is lower than limit, all priority tasks can be run.
Else it is a function between target priority and (current / limit).
"""
if limit <= 0:
return PRIORITY_MAX
if current < limit:
return PRIORITY_MAX
return (-4.55 * (current * 1.0 / limit)) + 14.55
@classmethod
def parse_return_status(cls, status):
if (status & 0xff) != 0:
endstr = "killed by signal"
code = (status & 0xff)
else:
endstr = "ended with return code"
# See "EXIT VALUES" section in man rsync
code = (status >> 8)
return (endstr, code)
|
WikiTeam/wikiteam | refs/heads/master | listsofwikis/wikispaces/wikispaces-duckduckgo.py | 1 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2018 WikiTeam developers
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import re
import time
import urllib.request
def main():
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
words = []
with open('words.txt', 'r') as f:
words = f.read().strip().splitlines()
random.shuffle(words)
print('Loaded %d words from file' % (len(words)))
#words = words + ['%d' % (i) for i in range(1900, 1980, 10)]
wikis = []
with open('wikispaces-duckduckgo.txt', 'r') as f:
wikis = f.read().strip().splitlines()
wikis.sort()
print('Loaded %d wikis from file' % (len(wikis)))
for i in range(1, 100):
random.shuffle(words)
for word in words:
print('Word', word)
word_ = re.sub(' ', '+', word)
url = ''
r = random.randint(0, 10)
if r == 0:
url = 'https://duckduckgo.com/html/?q=%s%%20site:wikispaces.com' % (word_)
elif r == 1:
url = 'https://duckduckgo.com/html/?q=%s%%20wikispaces.com' % (word_)
elif r == 2:
url = 'https://duckduckgo.com/html/?q=%s%%20%s%%20wikispaces.com' % (word_, random.randint(100, 3000))
elif r == 3:
url = 'https://duckduckgo.com/html/?q=%s%%20%s%%20wikispaces.com' % (random.randint(100, 3000), word_)
else:
url = 'https://duckduckgo.com/html/?q=%s%%20%s%%20wikispaces.com' % (word_, random.randint(100, 3000))
print('URL search', url)
try:
html = urllib.request.urlopen(url).read().decode('utf-8')
except:
print('Search error')
sys.exit()
html = urllib.parse.unquote(html)
m = re.findall(r'://([^/]+?\.wikispaces\.com)', html)
for wiki in m:
wiki = 'https://' + wiki
if not wiki in wikis:
wikis.append(wiki)
wikis.sort()
print(wiki)
with open('wikispaces-duckduckgo.txt', 'w') as f:
wikis2 = []
for wiki in wikis:
wiki = re.sub(r'https://www\.', 'https://', wiki)
if not wiki in wikis2:
wikis2.append(wiki)
wikis = wikis2
wikis.sort()
f.write('\n'.join(wikis))
print('%d wikis found' % (len(wikis)))
sleep = random.randint(5,20)
print('Sleeping %d seconds' % (sleep))
time.sleep(sleep)
if __name__ == '__main__':
main()
|
blackPantherOS/packagemanagement | refs/heads/master | smartpm/tests/channel.py | 3 | import unittest
from smart.channel import parseChannelsDescription, PackageChannel
from smart.cache import Loader
class ParseChannelsDescriptionTest(unittest.TestCase):
def test_parseChannelsDescription(self):
data = parseChannelsDescription("""
[alias]
type = deb-sys
name = first = second
""")
self.assertEquals(data, {'alias': {'type': 'deb-sys',
'name': 'first = second'}})
def test_removeLoaders_without_cache(self):
class TestChannel(PackageChannel):
def fetch(self, fetcher, progress):
self._loaders.append(Loader())
channel = TestChannel("type", "alias")
channel.fetch(None, None)
channel.removeLoaders()
self.assertEquals(channel.getLoaders(), [])
|
darkleons/lama | refs/heads/master | addons/mail/res_config.py | 301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import urlparse
from openerp.osv import osv, fields
class project_configuration(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'alias_domain': fields.char('Alias Domain',
help="If you have setup a catch-all email domain redirected to "
"the Odoo server, enter the domain name here."),
}
def get_default_alias_domain(self, cr, uid, ids, context=None):
alias_domain = self.pool.get("ir.config_parameter").get_param(cr, uid, "mail.catchall.domain", default=None, context=context)
if alias_domain is None:
domain = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.base.url", context=context)
try:
alias_domain = urlparse.urlsplit(domain).netloc.split(':')[0]
except Exception:
pass
return {'alias_domain': alias_domain or False}
def set_alias_domain(self, cr, uid, ids, context=None):
config_parameters = self.pool.get("ir.config_parameter")
for record in self.browse(cr, uid, ids, context=context):
config_parameters.set_param(cr, uid, "mail.catchall.domain", record.alias_domain or '', context=context)
|
rruebner/odoo | refs/heads/master | addons/website_project/controllers/__init__.py | 7372 | import main
|
eric-pedersen/tritrophic-dispersal-model | refs/heads/master | Code/auto_code_for_bifurcation_diagrams/dispersal_two_parameter_analyses.py | 1 | ##################################################
##################################################
####### Two-dimensional bifurcations with ########
## respect to two separate dispersal parameters ##
##################################################
##################################################
###########################
###### #1: dR / dH ########
###########################
# Following the branch point for the asymmetric equm
dX_dY_plot = auto.run(asym_branch_dH,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_branch_dH,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
#following the asymmetric Hopf bifurcations
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
auto.cl()
#following the Hopf Bifurcation induced by dispersal - Asymmetric Tritrophic
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
###########################
###### #2: dH / dP ########
###########################
# Following the branch point for the asymetric equm
dX_dY_plot = dX_dY_plot+ auto.run(asym_branch_dH,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_branch_dH,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
#following the asymmetric Hopf bifurcation
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
auto.cl()
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH_low,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dH_low,
c='spatial_IGP.FP',
ICP = ['dH','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
auto.cl()
#following the Hopf Bifurcation induced by dispersal - Asymmetric Tritrophic
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dP','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dP','dH'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
###########################
##### #3: dR / dP #########
###########################
# Following the branch point for the asymmetric equm
dX_dY_plot = dX_dY_plot+ auto.run(asym_branch_dR,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_branch_dR,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
IPS = 1,
ISW=2,
ILP=0, #tells auto not to find folds;
DS = -0.05,
DSMIN= 0.005,
DSMAX= 0.01,
NMX= 2000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
#following the asymmetric Hopf bifurcation
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.0005,
DSMIN= 0.00005,
DSMAX= 0.001,
NMX= 50000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.001,
NMX= 7000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 8000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(asym_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dP','dR'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.001,
NMX= 8000,
UZSTOP={'dH':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
#following the Hopf Bifurcation induced by dispersal - Asymmetric Tritrophic
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dR','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dP,
c='spatial_IGP.FP',
ICP = ['dR','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
dX_dY_plot = dX_dY_plot + auto.run(sym_disperse_hopf_dR,
c='spatial_IGP.FP',
ICP = ['dR','dP'],
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.0005,
DSMAX= 0.01,
NMX= 7000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
###############################################
#### Following the limit cycle branch that ####
### leads to bistability, and the bistability##
### inducing fold in limit cycles #############
###############################################
plot_pairs = [("dR","dP"),("dR","dH"),("dH","dP")]
for i in plot_pairs:
first_parameter = i[0]
second_parameter = i[1]
dX_dY_plot = dX_dY_plot + auto.run(limit_branch_points[first_parameter],
c='spatial_IGP.FP',
ICP = [first_parameter,second_parameter,11],
IPS=2,
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.005,
DSMAX= 0.01,
NMX= 70000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = dX_dY_plot + auto.run(limit_branch_points[first_parameter],
c='spatial_IGP.FP',
ICP = [first_parameter,second_parameter,11],
IPS=2,
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.005,
DSMAX= 0.01,
NMX= 70000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = dX_dY_plot + auto.run(fold_points[first_parameter],
c='spatial_IGP.FP',
ICP = [first_parameter,second_parameter,11],
IPS=2,
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = 0.005,
DSMIN= 0.005,
DSMAX= 0.01,
NMX= 70000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = dX_dY_plot + auto.run(fold_points[first_parameter],
c='spatial_IGP.FP',
ICP = [first_parameter,second_parameter,11],
IPS=2,
ISP=1,
ISW = 2,
ILP=0, #tells auto not to find folds;
DS = -0.005,
DSMIN= 0.005,
DSMAX= 0.01,
NMX= 70000,
UZSTOP={'dR':range_dispersal_2_par,
'dH':range_dispersal_2_par,
'dP':range_dispersal_2_par,
'K':range_K_two_parameter,
'aRP':range_IGP})
dX_dY_plot = auto.rl(dX_dY_plot)
auto.cl()
|
RomanZacharia/pyethapp | refs/heads/develop | pyethapp/tests/test_pow_service.py | 1 | import pytest
from gevent.event import Event
from devp2p.app import BaseApp
from devp2p.service import BaseService
from ethereum import slogging
from ethereum.blocks import Block, BlockHeader
from ethereum.config import Env
from ethereum.db import DB
from pyethapp.pow_service import PoWService
DIFFICULTY = 1024 # Mining difficulty.
TIMEOUT = 15 # Timeout for single block being minded.
class ChainServiceMock(BaseService):
name = 'chain'
class ChainMock:
def __init__(self):
env = Env(DB())
header = BlockHeader(difficulty=DIFFICULTY)
self.head_candidate = Block(header, env=env)
def __init__(self, app):
super(ChainServiceMock, self).__init__(app)
self.on_new_head_candidate_cbs = []
self.is_syncing = False
self.chain = self.ChainMock()
self.mined_block = None
self.block_mined_event = Event()
def add_mined_block(self, block):
assert self.mined_block is None
self.mined_block = block
self.block_mined_event.set()
@pytest.fixture
def app(request):
app = BaseApp()
ChainServiceMock.register_with_app(app)
PoWService.register_with_app(app)
app.start()
request.addfinalizer(lambda: app.stop())
return app
def test_pow_default(app):
chain = app.services.chain
pow = app.services.pow
assert pow
assert chain
assert len(chain.on_new_head_candidate_cbs) == 1
assert not pow.active
assert not app.config['pow']['activated']
assert app.config['pow']['cpu_pct'] == 100
assert not app.config['pow']['coinbase_hex']
assert app.config['pow']['mine_empty_blocks']
def test_pow_config(app):
app.config['pow']['activated'] = True
assert app.services.pow.active
def test_pow_mine_empty_block(app):
slogging.configure("pow:trace")
app.config['pow']['activated'] = True
chain = app.services.chain
e = chain.block_mined_event
e.wait(timeout=TIMEOUT)
assert e.is_set(), "Block has not been mined for {} s".format(TIMEOUT)
assert chain.mined_block
def test_pow_dont_mine_empty_block(app):
slogging.configure("pow:trace")
app.config['pow']['mine_empty_blocks'] = False
app.config['pow']['activated'] = True
chain = app.services.chain
pow = app.services.pow
e = chain.block_mined_event
e.wait(timeout=2)
assert not e.is_set(), "Block has been mined"
assert chain.mined_block is None
assert pow.hashrate == 0, "Miner is working"
|
silly-wacky-3-town-toon/SOURCE-COD | refs/heads/master | Panda3D-1.10.0/python/Lib/textwrap.py | 110 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
__revision__ = "$Id$"
import string, re
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, _unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
|
zachmullen/boto | refs/heads/develop | boto/ecs/__init__.py | 153 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.connection import AWSQueryConnection, AWSAuthConnection
from boto.exception import BotoServerError
import time
import urllib
import xml.sax
from boto.ecs.item import ItemSet
from boto import handler
class ECSConnection(AWSQueryConnection):
"""
ECommerce Connection
For more information on how to use this module see:
http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html
"""
APIVersion = '2010-11-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, profile_name=None):
super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, path,
security_token=security_token,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ecs']
def get_response(self, action, params, page=0, itemSet=None):
"""
Utility method to handle calls to ECS and parsing of responses.
"""
params['Service'] = "AWSECommerceService"
params['Operation'] = action
if page:
params['ItemPage'] = page
response = self.make_request(None, params, "/onca/xml")
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise BotoServerError(response.status, response.reason, body)
if itemSet is None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body.encode('utf-8'), h)
if not rs.is_valid:
raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0]))
return rs
#
# Group methods
#
def item_search(self, search_index, **params):
"""
Returns items that satisfy the search criteria, including one or more search
indices.
For a full list of search terms,
:see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html
"""
params['SearchIndex'] = search_index
return self.get_response('ItemSearch', params)
def item_lookup(self, **params):
"""
Returns items that satisfy the lookup query.
For a full list of parameters, see:
http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf
"""
return self.get_response('ItemLookup', params) |
jhseu/tensorflow | refs/heads/master | tensorflow/python/autograph/core/converter.py | 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
|
sloe/analyseapp | refs/heads/analyseapp | models/db.py | 1 | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
## (optional) static assets folder versioning
# response.static_version = '0.0.0'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db)
service = Service()
plugins = PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else 'smtp.gmail.com:587'
mail.settings.sender = 'you@gmail.com'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.janrain_account import use_janrain
use_janrain(auth, filename='private/janrain.key')
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
mail.settings.server = settings.email_server
mail.settings.sender = settings.email_sender
mail.settings.login = settings.email_login
|
ZombieAlex/MFCAuto | refs/heads/master | src/test/dumpSchemaFromPacketLog.py | 1 | """
After running packetInspector.js for however long, you'll have
a packetLog.txt file with a ton of traffic. This script will
read in that log and dump a JSON schema that correctly validates
all the sMessage payloads of the packets received in the log.
Requires genson (https://github.com/wolverdude/genson/)
pip install genson
"""
import sys
import json
from genson import Schema
if __name__ == "__main__":
fctypeMap = {}
with open("packetLog.txt", encoding="utf-8") as log:
for line in log:
# Strip off the timestamp and filename
line = line[38:]
packet = json.loads(line)
if (packet["FCType"] == "MANAGELIST"
or packet["FCType"] == "TAGS"
or packet["FCType"] == "ROOMDATA"):
# These packet types are too diverse
# or overly unique and generate schemas
# that are not particularly useful
continue
schema = fctypeMap.setdefault(packet["FCType"], Schema())
sMessage = packet.setdefault("sMessage", None)
schema.add_object(sMessage)
full_schema = "{"
for k, v in fctypeMap.items():
full_schema += '"{}": {},'.format(k, v.to_json())
full_schema = full_schema[:-1]
full_schema += "}"
with open("packetLogSchema.json", "w") as output:
output.write(json.dumps(json.loads(full_schema), sort_keys=True, indent=4))
|
arnavd96/Cinemiezer | refs/heads/master | myvenv/lib/python3.4/site-packages/ratelimit/models.py | 2 | # This module intentionally left blank.
|
jarshwah/django | refs/heads/master | tests/postgres_tests/migrations/0002_create_test_models.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.serializers.json import DjangoJSONEncoder
from django.db import migrations, models
from ..fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
SearchVectorField,
)
from ..models import TagField
class Migration(migrations.Migration):
dependencies = [
('postgres_tests', '0001_setup_extensions'),
]
operations = [
migrations.CreateModel(
name='CharArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.CharField(max_length=10), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DateTimeArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetimes', ArrayField(models.DateTimeField(), size=None)),
('dates', ArrayField(models.DateField(), size=None)),
('times', ArrayField(models.TimeField(), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HStoreModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', HStoreField(blank=True, null=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OtherTypesArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ips', ArrayField(models.GenericIPAddressField(), size=None)),
('uuids', ArrayField(models.UUIDField(), size=None)),
('decimals', ArrayField(models.DecimalField(max_digits=5, decimal_places=2), size=None)),
('tags', ArrayField(TagField(), blank=True, null=True, size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='IntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.IntegerField(), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NestedIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(ArrayField(models.IntegerField(), size=None), size=None)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NullableIntegerArrayModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', ArrayField(models.IntegerField(), size=None, null=True, blank=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CharFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.CharField(max_length=16)),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='TextFieldModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', models.TextField()),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='Scene',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scene', models.CharField(max_length=255)),
('setting', models.CharField(max_length=255)),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options=None,
bases=None,
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scene', models.ForeignKey('postgres_tests.Scene', on_delete=models.SET_NULL)),
('character', models.ForeignKey('postgres_tests.Character', on_delete=models.SET_NULL)),
('dialogue', models.TextField(blank=True, null=True)),
('dialogue_search_vector', SearchVectorField(blank=True, null=True)),
('dialogue_config', models.CharField(max_length=100, blank=True, null=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=None,
),
migrations.CreateModel(
name='AggregateTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('boolean_field', models.NullBooleanField()),
('char_field', models.CharField(max_length=30, blank=True)),
('integer_field', models.IntegerField(null=True)),
]
),
migrations.CreateModel(
name='StatTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('int1', models.IntegerField()),
('int2', models.IntegerField()),
('related_field', models.ForeignKey(
'postgres_tests.AggregateTestModel',
models.SET_NULL,
null=True,
)),
]
),
migrations.CreateModel(
name='NowTestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField(null=True, default=None)),
]
),
migrations.CreateModel(
name='RangesModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ints', IntegerRangeField(null=True, blank=True)),
('bigints', BigIntegerRangeField(null=True, blank=True)),
('floats', FloatRangeField(null=True, blank=True)),
('timestamps', DateTimeRangeField(null=True, blank=True)),
('dates', DateRangeField(null=True, blank=True)),
],
options={
'required_db_vendor': 'postgresql'
},
bases=(models.Model,)
),
migrations.CreateModel(
name='RangeLookupsModel',
fields=[
('parent', models.ForeignKey(
'postgres_tests.RangesModel',
models.SET_NULL,
blank=True, null=True,
)),
('integer', models.IntegerField(blank=True, null=True)),
('big_integer', models.BigIntegerField(blank=True, null=True)),
('float', models.FloatField(blank=True, null=True)),
('timestamp', models.DateTimeField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
],
options={
'required_db_vendor': 'postgresql',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JSONModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', JSONField(null=True, blank=True)),
('field_custom', JSONField(null=True, blank=True, encoder=DjangoJSONEncoder)),
],
options={
'required_db_features': {'has_jsonb_datatype'},
},
bases=(models.Model,),
),
]
|
smunix/ns-3-rfid | refs/heads/master | bindings/python/ns3modulegen.py | 199 |
LOCAL_MODULES = [
#'my_extra_api_definitions',
]
import sys
import os
sys.path.insert(0, sys.argv[2])
from pybindgen import FileCodeSink, write_preamble
from pybindgen.module import MultiSectionFactory
import pybindgen.settings
pybindgen.settings.deprecated_virtuals = False
from ns3modulegen_generated import module_init, register_types, register_methods, register_functions
import ns3modulegen_core_customizations
import callbacks_list
import traceback
this_script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
print >> sys.stderr
print >> sys.stderr, "---- location:"
traceback.print_stack()
print >> sys.stderr, "---- error:"
traceback.print_tb(traceback_)
try:
stack = wrapper.stack_where_defined
except AttributeError:
print >> sys.stderr, "??:??: %s / %r" % (wrapper, exception)
else:
stack = list(stack)
stack.reverse()
for (filename, line_number, function_name, text) in stack:
file_dir = os.path.dirname(os.path.abspath(filename))
if file_dir.startswith(this_script_dir):
print >> sys.stderr, "%s:%i: %r" % (os.path.join("..", "bindings", "python", os.path.basename(filename)),
line_number, exception)
break
return True
pybindgen.settings.error_handler = ErrorHandler()
pybindgen.settings.wrapper_registry = pybindgen.settings.StdMapWrapperRegistry
class MyMultiSectionFactory(MultiSectionFactory):
def __init__(self, main_file_name, modules):
super(MyMultiSectionFactory, self).__init__()
self.main_file_name = main_file_name
self.main_sink = FileCodeSink(open(main_file_name, "wt"))
self.header_name = "ns3module.h"
header_file_name = os.path.join(os.path.dirname(self.main_file_name), 'pch', self.header_name)
self.header_sink = FileCodeSink(open(header_file_name, "wt"))
self.section_sinks = {'__main__': self.main_sink}
for module in modules:
section_name = 'ns3_module_%s' % module.replace('-', '_')
file_name = os.path.join(os.path.dirname(self.main_file_name), "%s.cc" % section_name)
sink = FileCodeSink(open(file_name, "wt"))
self.section_sinks[section_name] = sink
def get_section_code_sink(self, section_name):
return self.section_sinks[section_name]
def get_main_code_sink(self):
return self.main_sink
def get_common_header_code_sink(self):
return self.header_sink
def get_common_header_include(self):
return '"%s"' % self.header_name
def close(self):
self.header_sink.file.close()
self.main_sink.file.close()
for sink in self.section_sinks.itervalues():
sink.file.close()
def main():
out = MyMultiSectionFactory(sys.argv[1], sys.argv[3:])
root_module = module_init()
root_module.add_include('"everything.h"')
register_types(root_module)
ns3modulegen_core_customizations.Simulator_customizations(root_module)
ns3modulegen_core_customizations.CommandLine_customizations(root_module)
ns3modulegen_core_customizations.TypeId_customizations(root_module)
ns3modulegen_core_customizations.add_std_ofstream(root_module)
ns3modulegen_core_customizations.add_ipv4_address_tp_hash(root_module)
for local_module in LOCAL_MODULES:
mod = __import__(local_module)
mod.register_types(root_module)
ns3modulegen_core_customizations.generate_callback_classes(root_module.after_forward_declarations,
callbacks_list.callback_classes)
register_methods(root_module)
for local_module in LOCAL_MODULES:
mod = __import__(local_module)
mod.register_methods(root_module)
ns3modulegen_core_customizations.Object_customizations(root_module)
ns3modulegen_core_customizations.Attribute_customizations(root_module)
register_functions(root_module)
for local_module in LOCAL_MODULES:
mod = __import__(local_module)
mod.register_functions(root_module)
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
# if GtkConfigStore support is disabled, disable the class wrapper
if 'GtkConfigStore' not in enabled_features:
try:
root_module.classes.remove(root_module['ns3::GtkConfigStore'])
except KeyError:
pass
# if no sqlite, the class SqliteDataOutput is disabled
if 'SqliteDataOutput' not in enabled_features:
try:
root_module.classes.remove(root_module['ns3::SqliteDataOutput'])
except KeyError:
pass
if 'Threading' not in enabled_features:
for clsname in ['SystemThread', 'SystemMutex', 'SystemCondition', 'CriticalSection',
'SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'EmuNetDevice' not in enabled_features:
for clsname in ['EmuNetDevice', 'EmuHelper']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::EmuNetDevice::EncapsulationMode'])
if 'RealTime' not in enabled_features:
for clsname in ['WallClockSynchronizer', 'RealtimeSimulatorImpl']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::RealtimeSimulatorImpl::SynchronizationMode'])
if 'TapBridge' not in enabled_features:
for clsname in ['TapBridge', 'TapBridgeHelper', 'TapBridgeFdReader']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::TapBridge::Mode'])
root_module.generate(out, '_ns3')
out.close()
if __name__ == '__main__':
if 0:
try:
import cProfile as profile
except ImportError:
main()
else:
print >> sys.stderr, "** running under profiler"
profile.run('main()', 'ns3modulegen.pstat')
else:
main()
|
ahmetabdi/SickRage | refs/heads/master | lib/fanart/items.py | 61 | import json
import os
import requests
from fanart.core import Request
from fanart.immutable import Immutable
class LeafItem(Immutable):
KEY = NotImplemented
@Immutable.mutablemethod
def __init__(self, id, url, likes):
self.id = int(id)
self.url = url
self.likes = int(likes)
self._content = None
@classmethod
def from_dict(cls, resource):
return cls(**dict([(str(k), v) for k, v in resource.iteritems()]))
@classmethod
def extract(cls, resource):
return [cls.from_dict(i) for i in resource.get(cls.KEY, {})]
@Immutable.mutablemethod
def content(self):
if not self._content:
self._content = requests.get(self.url).content
return self._content
def __str__(self):
return self.url
class ResourceItem(Immutable):
WS = NotImplemented
request_cls = Request
@classmethod
def from_dict(cls, map):
raise NotImplementedError
@classmethod
def get(cls, id):
map = cls.request_cls(
apikey=os.environ.get('FANART_APIKEY'),
id=id,
ws=cls.WS
).response()
return cls.from_dict(map)
def json(self, **kw):
return json.dumps(
self,
default=lambda o: dict([(k, v) for k, v in o.__dict__.items() if not k.startswith('_')]),
**kw
)
class CollectableItem(Immutable):
@classmethod
def from_dict(cls, key, map):
raise NotImplementedError
@classmethod
def collection_from_dict(cls, map):
return [cls.from_dict(k, v) for k, v in map.iteritems()]
|
zerosign/mig | refs/heads/master | tools/client.py | 16 | #!/usr/bin/env python
import os
import sys
import gnupg
from time import gmtime, strftime
import random
import requests
import json
def makeToken(gpghome, keyid):
gpg = gnupg.GPG(gnupghome=gpghome)
version = "1"
timestamp = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
nonce = str(random.randint(10000, 18446744073709551616))
token = version + ";" + timestamp + ";" + nonce
sig = gpg.sign(token + "\n",
keyid=keyid,
detach=True, clearsign=True)
token += ";"
linectr=0
for line in iter(str(sig).splitlines()):
linectr+=1
if linectr < 4 or line.startswith('-') or not line:
continue
token += line
return token
if __name__ == '__main__':
token = makeToken("/home/ulfr/.gnupg", "E60892BB9BD89A69F759A1A0A3D652173B763E8F")
r = requests.get(sys.argv[1],
headers={'X-PGPAUTHORIZATION': token},
verify=True)
if r.status_code == 200:
print json.dumps(r.json(), sort_keys=True, indent=4, separators=(',', ': '))
elif r.status_code == 500:
print r.json()
# api returns a 500 with an error body on failures
migjson=r.json()
raise Exception("API returned HTTP code %s and error '%s:%s'" %
(r.status_code,
migjson['collection']['error']['code'],
migjson['collection']['error']['message'])
)
else:
# another type of failure that's unlikely to have an error body
raise Exception("Failed with HTTP code %s" % r.status_code)
|
adambrenecki/django | refs/heads/master | django/db/models/sql/expressions.py | 4 | import copy
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
class SQLEvaluator(object):
def __init__(self, expression, query, allow_joins=True, reuse=None):
self.expression = expression
self.opts = query.get_meta()
self.reuse = reuse
self.cols = []
self.expression.prepare(self, query, allow_joins)
def relabeled_clone(self, change_map):
clone = copy.copy(self)
clone.cols = []
for node, col in self.cols:
if hasattr(col, 'relabeled_clone'):
clone.cols.append((node, col.relabeled_clone(change_map)))
else:
clone.cols.append((node,
(change_map.get(col[0], col[0]), col[1])))
return clone
def get_cols(self):
cols = []
for node, col in self.cols:
if hasattr(node, 'get_cols'):
cols.extend(node.get_cols())
elif isinstance(col, tuple):
cols.append(col)
return cols
def prepare(self):
return self
def as_sql(self, qn, connection):
return self.expression.evaluate(self, qn, connection)
#####################################################
# Vistor methods for initial expression preparation #
#####################################################
def prepare_node(self, node, query, allow_joins):
for child in node.children:
if hasattr(child, 'prepare'):
child.prepare(self, query, allow_joins)
def prepare_leaf(self, node, query, allow_joins):
if not allow_joins and LOOKUP_SEP in node.name:
raise FieldError("Joined field references are not permitted in this query")
field_list = node.name.split(LOOKUP_SEP)
if node.name in query.aggregates:
self.cols.append((node, query.aggregate_select[node.name]))
else:
try:
field, sources, opts, join_list, path = query.setup_joins(
field_list, query.get_meta(),
query.get_initial_alias(), self.reuse)
targets, _, join_list = query.trim_joins(sources, join_list, path)
if self.reuse is not None:
self.reuse.update(join_list)
for t in targets:
self.cols.append((node, (join_list[-1], t.column)))
except FieldDoesNotExist:
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (self.name,
[f.name for f in self.opts.fields]))
##################################################
# Vistor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
expressions = []
expression_params = []
for child in node.children:
if hasattr(child, 'evaluate'):
sql, params = child.evaluate(self, qn, connection)
else:
sql, params = '%s', (child,)
if len(getattr(child, 'children', [])) > 1:
format = '(%s)'
else:
format = '%s'
if sql:
expressions.append(format % sql)
expression_params.extend(params)
return connection.ops.combine_expression(node.connector, expressions), expression_params
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
if hasattr(col, 'as_sql'):
return col.as_sql(qn, connection)
else:
return '%s.%s' % (qn(col[0]), qn(col[1])), []
def evaluate_date_modifier_node(self, node, qn, connection):
timedelta = node.children.pop()
sql, params = self.evaluate_node(node, qn, connection)
if timedelta.days == 0 and timedelta.seconds == 0 and \
timedelta.microseconds == 0:
return sql, params
return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
|
camradal/ansible-modules-extras | refs/heads/devel | cloud/webfaction/webfaction_site.py | 62 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
main()
|
codedsk/hubcheck | refs/heads/master | hubcheck/pageobjects/m_osr_1_2_0.py | 1 | # represents hubs updated around November 2013
# load default page object, widgets, and locators
from m_osr_1_1_5 import *
#from m_overrides_group_wiki_editors import *
# load page object overrides
from po_admin_base_page import AdminBasePage
from po_admin_database_backup_page import AdminDatabaseBackupPage
from po_admin_database_list_page import AdminDatabaseListPage
from po_admin_database_table_list_page import AdminDatabaseTableListPage
from po_admin_database_table_manage_page import AdminDatabaseTableManagePage
# load widget overrides
from widgets.admin_database_backup import AdminDatabaseBackup1 as AdminDatabaseBackup
from widgets.admin_database_list import AdminDatabaseList1 as AdminDatabaseList
from widgets.admin_database_list_item import AdminDatabaseListItem1 as AdminDatabaseListItem
from widgets.admin_database_table_list import AdminDatabaseTableList1 as AdminDatabaseTableList
from widgets.admin_database_table_list_item import AdminDatabaseTableListItem1 as AdminDatabaseTableListItem
from widgets.admin_database_table_manage_batch_update import AdminDatabaseTableManageBatchUpdate1 as AdminDatabaseTableManageBatchUpdate
from widgets.admin_header import AdminHeader1 as AdminHeader
from widgets.admin_login import AdminLogin1 as AdminLogin
from widgets.admin_menu import AdminMenu1 as AdminMenu
from widgets.groups_wiki_new_form import GroupsWikiNewForm3 as GroupsWikiNewForm
from widgets.groups_wiki_edit_form import GroupsWikiEditForm3 as GroupsWikiEditForm
from widgets.header import Header2 as Header
from widgets.tags_browse_results_row import TagsBrowseResultsRow2 as TagsBrowseResultsRow
from widgets.upload3 import Upload3
from widgets.upload_list_row import UploadListRow
# load page object locator overrides
from po_admin_base_page import AdminBasePage_Locators_Base as AdminBasePage_Locators
from po_admin_database_backup_page import AdminDatabaseBackupPage_Locators_Base as AdminDatabaseBackupPage_Locators
from po_admin_database_list_page import AdminDatabaseListPage_Locators_Base as AdminDatabaseListPage_Locators
from po_admin_database_table_list_page import AdminDatabaseTableListPage_Locators_Base as AdminDatabaseTableListPage_Locators
from po_admin_database_table_manage_page import AdminDatabaseTableManagePage_Locators_Base as AdminDatabaseTableManagePage_Locators
from po_generic_page import GenericPage_Locators_Base_1 as GenericPage_Locators
from po_login import LoginPage1_Locators_Base_3 as LoginPage_Locators
from po_wishlistsearch import WishlistSearchPage_Locators_Base_2 as WishlistSearchPage_Locators
# load widget locator overrides
from widgets.admin_database_backup import AdminDatabaseBackup1_Locators_Base_1 as AdminDatabaseBackup_Locators
from widgets.admin_database_list import AdminDatabaseList1_Locators_Base_1 as AdminDatabaseList_Locators
from widgets.admin_database_list_item import AdminDatabaseListItem1_Locators_Base_1 as AdminDatabaseListItem_Locators
from widgets.admin_database_table_list import AdminDatabaseTableList1_Locators_Base_1 as AdminDatabaseTableList_Locators
from widgets.admin_database_table_list_item import AdminDatabaseTableListItem1_Locators_Base_1 as AdminDatabaseTableListItem_Locators
from widgets.admin_database_table_manage_batch_update import AdminDatabaseTableManageBatchUpdate1_Locators_Base_1 as AdminDatabaseTableManageBatchUpdate_Locators
from widgets.admin_header import AdminHeader1_Locators_Base_1 as AdminHeader_Locators
from widgets.admin_login import AdminLogin1_Locators_Base_2 as AdminLogin_Locators
from widgets.admin_menu import AdminMenu1_Locators_Base_1 as AdminMenu_Locators
from widgets.groups import Groups2_Locators_Base_2 as Groups_Locators
from widgets.groups_wiki_new_form import GroupsWikiNewForm3_Locators_Base as GroupsWikiNewForm_Locators
from widgets.groups_wiki_edit_form import GroupsWikiEditForm3_Locators_Base as GroupsWikiEditForm_Locators
from widgets.header import Header2_Locators_Base_2 as Header_Locators
from widgets.login_base import Login_Locators_Base_3 as Login_Locators
from widgets.tags import Tags_Locators_Base_2 as Tags_Locators
from widgets.tags_browse_results_row import TagsBrowseResultsRow2_Locators_Base_1 as TagsBrowseResultsRow_Locators
from widgets.ticket_comment import TicketComment_Locators_Base_3 as TicketComment_Locators
from widgets.ticket_comment_form import TicketCommentForm_Locators_Base_2 as TicketCommentForm_Locators
from widgets.upload3 import Upload3_Locators_Base as Upload3_Locators
from widgets.upload_list_row import UploadListRow_Locators_Base as UploadListRow_Locators
from widgets.wishlist_search_form import WishlistSearchForm_Locators_Base_2 as WishlistSearchForm_Locators
|
evernym/plenum | refs/heads/master | plenum/server/request_handlers/state_constants.py | 2 | MARKER_TAA = "2"
MARKER_TAA_AML = "3"
|
ShashaQin/erpnext | refs/heads/develop | erpnext/stock/doctype/stock_entry/test_stock_entry.py | 6 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
import frappe.defaults
from frappe.utils import flt, nowdate, nowtime
from erpnext.stock.doctype.serial_no.serial_no import *
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt \
import set_perpetual_inventory
from erpnext.stock.doctype.stock_ledger_entry.stock_ledger_entry import StockFreezeError
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.stock.doctype.stock_reconciliation.test_stock_reconciliation import create_stock_reconciliation
from frappe.tests.test_permissions import set_user_permission_doctypes
def get_sle(**args):
condition, values = "", []
for key, value in args.iteritems():
condition += " and " if condition else " where "
condition += "`{0}`=%s".format(key)
values.append(value)
return frappe.db.sql("""select * from `tabStock Ledger Entry` %s
order by timestamp(posting_date, posting_time) desc, name desc limit 1"""% condition,
values, as_dict=1)
class TestStockEntry(unittest.TestCase):
def tearDown(self):
frappe.set_user("Administrator")
set_perpetual_inventory(0)
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctype="Stock Entry", role=role,
apply_user_permissions=0, user_permission_doctypes=None)
def test_fifo(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
item_code = "_Test Item 2"
warehouse = "_Test Warehouse - _TC"
create_stock_reconciliation(item_code="_Test Item 2", warehouse="_Test Warehouse - _TC",
qty=0, rate=100)
make_stock_entry(item_code=item_code, target=warehouse, qty=1, basic_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 10]], eval(sle.stock_queue))
# negative qty
make_stock_entry(item_code=item_code, source=warehouse, qty=2, basic_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-1, 10]], eval(sle.stock_queue))
# further negative
make_stock_entry(item_code=item_code, source=warehouse, qty=1)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-2, 10]], eval(sle.stock_queue))
# move stock to positive
make_stock_entry(item_code=item_code, target=warehouse, qty=3, basic_rate=20)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20]], eval(sle.stock_queue))
# incoming entry with diff rate
make_stock_entry(item_code=item_code, target=warehouse, qty=1, basic_rate=30)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20],[1, 30]], eval(sle.stock_queue))
frappe.db.set_default("allow_negative_stock", 0)
def test_auto_material_request(self):
from erpnext.stock.doctype.item.test_item import make_item_variant
make_item_variant()
self._test_auto_material_request("_Test Item")
self._test_auto_material_request("_Test Item", material_request_type="Transfer")
def test_auto_material_request_for_variant(self):
self._test_auto_material_request("_Test Variant Item-S")
def _test_auto_material_request(self, item_code, material_request_type="Purchase"):
item = frappe.get_doc("Item", item_code)
if item.variant_of:
template = frappe.get_doc("Item", item.variant_of)
else:
template = item
projected_qty, actual_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": "_Test Warehouse - _TC"}, ["projected_qty", "actual_qty"]) or [0, 0]
# stock entry reqd for auto-reorder
create_stock_reconciliation(item_code=item_code, warehouse="_Test Warehouse - _TC",
qty = actual_qty + abs(projected_qty) + 10, rate=100)
projected_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": "_Test Warehouse - _TC"}, "projected_qty") or 0
frappe.db.set_value("Stock Settings", None, "auto_indent", 1)
# update re-level qty so that it is more than projected_qty
if projected_qty >= template.reorder_levels[0].warehouse_reorder_level:
template.reorder_levels[0].warehouse_reorder_level += projected_qty
template.reorder_levels[0].material_request_type = material_request_type
template.save()
from erpnext.stock.reorder_item import reorder_item
mr_list = reorder_item()
frappe.db.set_value("Stock Settings", None, "auto_indent", 0)
items = []
for mr in mr_list:
for d in mr.items:
items.append(d.item_code)
self.assertTrue(item_code in items)
def test_material_receipt_gl_entry(self):
set_perpetual_inventory()
mr = make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, basic_rate=100)
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mr.get("items")[0].t_warehouse})
self.check_stock_ledger_entries("Stock Entry", mr.name,
[["_Test Item", "_Test Warehouse - _TC", 50.0]])
self.check_gl_entries("Stock Entry", mr.name,
sorted([
[stock_in_hand_account, 5000.0, 0.0],
["Stock Adjustment - _TC", 0.0, 5000.0]
])
)
mr.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
def test_material_issue_gl_entry(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, basic_rate=100)
mi = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC", qty=40)
self.check_stock_ledger_entries("Stock Entry", mi.name,
[["_Test Item", "_Test Warehouse - _TC", -40.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": "_Test Warehouse - _TC"})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mi.name}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mi.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
["Stock Adjustment - _TC", stock_value_diff, 0.0]
])
)
mi.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
self.assertFalse(frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
def test_material_transfer_gl_entry(self):
set_perpetual_inventory()
create_stock_reconciliation(qty=100, rate=100)
mtn = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC",
target="_Test Warehouse 1 - _TC", qty=45)
self.check_stock_ledger_entries("Stock Entry", mtn.name,
[["_Test Item", "_Test Warehouse - _TC", -45.0], ["_Test Item", "_Test Warehouse 1 - _TC", 45.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mtn.get("items")[0].s_warehouse})
fixed_asset_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mtn.get("items")[0].t_warehouse})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mtn.name, "warehouse": "_Test Warehouse - _TC"}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mtn.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
[fixed_asset_account, stock_value_diff, 0.0],
])
)
mtn.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
def test_repack_no_change_in_valuation(self):
set_perpetual_inventory(0)
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC",
qty=50, basic_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.insert()
repack.submit()
self.check_stock_ledger_entries("Stock Entry", repack.name,
[["_Test Item", "_Test Warehouse - _TC", -50.0],
["_Test Item Home Desktop 100", "_Test Warehouse - _TC", 1]])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Stock Entry' and voucher_no=%s
order by account desc""", repack.name, as_dict=1)
self.assertFalse(gl_entries)
set_perpetual_inventory(0)
def test_repack_with_additional_costs(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.set("additional_costs", [
{
"description": "Actual Oerating Cost",
"amount": 1000
},
{
"description": "additional operating costs",
"amount": 200
},
])
repack.insert()
repack.submit()
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": repack.get("items")[1].t_warehouse})
rm_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item"}, "stock_value_difference"))
fg_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item Home Desktop 100"}, "stock_value_difference"))
stock_value_diff = flt(fg_stock_value_diff - rm_stock_value_diff, 2)
self.assertEqual(stock_value_diff, 1200)
self.check_gl_entries("Stock Entry", repack.name,
sorted([
[stock_in_hand_account, 1200, 0.0],
["Expenses Included In Valuation - _TC", 0.0, 1200.0]
])
)
set_perpetual_inventory(0)
def check_stock_ledger_entries(self, voucher_type, voucher_no, expected_sle):
expected_sle.sort(key=lambda x: x[0])
# check stock ledger entries
sle = frappe.db.sql("""select item_code, warehouse, actual_qty
from `tabStock Ledger Entry` where voucher_type = %s
and voucher_no = %s order by item_code, warehouse, actual_qty""",
(voucher_type, voucher_no), as_list=1)
self.assertTrue(sle)
sle.sort(key=lambda x: x[0])
for i, sle in enumerate(sle):
self.assertEquals(expected_sle[i][0], sle[0])
self.assertEquals(expected_sle[i][1], sle[1])
self.assertEquals(expected_sle[i][2], sle[2])
def check_gl_entries(self, voucher_type, voucher_no, expected_gl_entries):
expected_gl_entries.sort(key=lambda x: x[0])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account asc, debit asc""", (voucher_type, voucher_no), as_list=1)
self.assertTrue(gl_entries)
gl_entries.sort(key=lambda x: x[0])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_gl_entries[i][0], gle[0])
self.assertEquals(expected_gl_entries[i][1], gle[1])
self.assertEquals(expected_gl_entries[i][2], gle[2])
def test_serial_no_not_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].serial_no = "ABCD"
se.insert()
self.assertRaises(SerialNoNotRequiredError, se.submit)
def test_serial_no_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoRequiredError, se.submit)
def test_serial_no_qty_more(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH\nXYZ"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_qty_less(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_transfer_in(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
se.submit()
self.assertTrue(frappe.db.exists("Serial No", "ABCD"))
self.assertTrue(frappe.db.exists("Serial No", "EFGH"))
se.cancel()
self.assertFalse(frappe.db.get_value("Serial No", "ABCD", "warehouse"))
def test_serial_no_not_exists(self):
frappe.db.sql("delete from `tabSerial No` where name in ('ABCD', 'EFGH')")
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Issue"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 2
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = None
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoNotExistsError, se.submit)
def test_serial_duplicate(self):
se, serial_nos = self.test_serial_by_series()
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].transfer_qty = 1
se.insert()
self.assertRaises(SerialNoDuplicateError, se.submit)
def test_serial_by_series(self):
se = make_serialized_item()
serial_nos = get_serial_nos(se.get("items")[0].serial_no)
self.assertTrue(frappe.db.exists("Serial No", serial_nos[0]))
self.assertTrue(frappe.db.exists("Serial No", serial_nos[1]))
return se, serial_nos
def test_serial_item_error(self):
se, serial_nos = self.test_serial_by_series()
make_serialized_item("_Test Serialized Item", "ABCD\nEFGH")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
self.assertRaises(SerialNoItemError, se.submit)
def test_serial_move(self):
se = make_serialized_item()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_no
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
se.submit()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse 1 - _TC")
se.cancel()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse - _TC")
def test_serial_warehouse_error(self):
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
t = make_serialized_item()
serial_nos = get_serial_nos(t.get("items")[0].serial_no)
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse - _TC"
se.insert()
self.assertRaises(SerialNoWarehouseError, se.submit)
def test_serial_cancel(self):
se, serial_nos = self.test_serial_by_series()
se.cancel()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
self.assertFalse(frappe.db.get_value("Serial No", serial_no, "warehouse"))
def test_warehouse_company_validation(self):
set_perpetual_inventory(0)
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test2@example.com")
from erpnext.stock.utils import InvalidWarehouseCompany
st1 = frappe.copy_doc(test_records[0])
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
self.assertRaises(InvalidWarehouseCompany, st1.submit)
# permission tests
def test_warehouse_user(self):
set_perpetual_inventory(0)
for role in ("Stock User", "Sales User"):
set_user_permission_doctypes(doctype="Stock Entry", role=role,
apply_user_permissions=1, user_permission_doctypes=["Warehouse"])
frappe.defaults.add_default("Warehouse", "_Test Warehouse 1 - _TC", "test@example.com", "User Permission")
frappe.defaults.add_default("Warehouse", "_Test Warehouse 2 - _TC1", "test2@example.com", "User Permission")
test_user = frappe.get_doc("User", "test@example.com")
test_user.add_roles("Sales User", "Sales Manager", "Stock User")
test_user.remove_roles("Stock Manager")
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
self.assertRaises(frappe.PermissionError, st1.insert)
frappe.set_user("test2@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
st1.submit()
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 1 - _TC",
"test@example.com", parenttype="User Permission")
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 2 - _TC1",
"test2@example.com", parenttype="User Permission")
def test_freeze_stocks(self):
frappe.db.set_value('Stock Settings', None,'stock_auth_role', '')
# test freeze_stocks_upto
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", add_days(nowdate(), 5))
se = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", '')
# test freeze_stocks_upto_days
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 7)
se = frappe.copy_doc(test_records[0])
se.posting_date = add_days(nowdate(), -15)
se.insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 0)
def test_production_order(self):
from erpnext.manufacturing.doctype.production_order.production_order \
import make_stock_entry as _make_stock_entry
bom_no, bom_operation_cost = frappe.db.get_value("BOM", {"item": "_Test FG Item 2",
"is_default": 1, "docstatus": 1}, ["name", "operating_cost"])
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test FG Item 2",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC",
"additional_operating_cost": 1000
})
production_order.insert()
production_order.submit()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
stock_entry = _make_stock_entry(production_order.name, "Manufacture", 1)
rm_cost = 0
for d in stock_entry.get("items"):
if d.s_warehouse:
rm_cost += flt(d.amount)
fg_cost = filter(lambda x: x.item_code=="_Test FG Item 2", stock_entry.get("items"))[0].amount
self.assertEqual(fg_cost,
flt(rm_cost + bom_operation_cost + production_order.additional_operating_cost, 2))
def test_variant_production_order(self):
bom_no = frappe.db.get_value("BOM", {"item": "_Test Variant Item",
"is_default": 1, "docstatus": 1})
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test Variant Item-S",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC"
})
production_order.insert()
production_order.submit()
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
stock_entry = frappe.get_doc(make_stock_entry(production_order.name, "Manufacture", 1))
stock_entry.insert()
self.assertTrue("_Test Variant Item-S" in [d.item_code for d in stock_entry.items])
def test_same_serial_nos_in_repack_or_manufacture_entries(self):
s1 = make_serialized_item(target_warehouse="_Test Warehouse - _TC")
serial_nos = s1.get("items")[0].serial_no
s2 = make_stock_entry(item_code="_Test Serialized Item With Series", source="_Test Warehouse - _TC",
qty=2, basic_rate=100, purpose="Repack", serial_no=serial_nos, do_not_save=True)
s2.append("items", {
"item_code": "_Test Serialized Item",
"t_warehouse": "_Test Warehouse - _TC",
"qty": 2,
"basic_rate": 120,
"expense_account": "Stock Adjustment - _TC",
"conversion_factor": 1.0,
"cost_center": "_Test Cost Center - _TC",
"serial_no": serial_nos
})
s2.submit()
s2.cancel()
def make_serialized_item(item_code=None, serial_no=None, target_warehouse=None):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = item_code or "_Test Serialized Item With Series"
se.get("items")[0].serial_no = serial_no
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
if target_warehouse:
se.get("items")[0].t_warehouse = target_warehouse
se.insert()
se.submit()
return se
def make_stock_entry(**args):
s = frappe.new_doc("Stock Entry")
args = frappe._dict(args)
if args.posting_date:
s.posting_date = args.posting_date
if args.posting_time:
s.posting_time = args.posting_time
if not args.purpose:
if args.source and args.target:
s.purpose = "Material Transfer"
elif args.source:
s.purpose = "Material Issue"
else:
s.purpose = "Material Receipt"
else:
s.purpose = args.purpose
s.company = args.company or "_Test Company"
s.purchase_receipt_no = args.purchase_receipt_no
s.delivery_note_no = args.delivery_note_no
s.sales_invoice_no = args.sales_invoice_no
s.difference_account = args.difference_account or "Stock Adjustment - _TC"
s.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"s_warehouse": args.from_warehouse or args.source,
"t_warehouse": args.to_warehouse or args.target,
"qty": args.qty,
"basic_rate": args.basic_rate,
"expense_account": args.expense_account or "Stock Adjustment - _TC",
"conversion_factor": 1.0,
"cost_center": "_Test Cost Center - _TC",
"serial_no": args.serial_no
})
if not args.do_not_save:
s.insert()
if not args.do_not_submit:
s.submit()
return s
def get_qty_after_transaction(**args):
args = frappe._dict(args)
last_sle = get_previous_sle({
"item_code": args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"posting_date": args.posting_date or nowdate(),
"posting_time": args.posting_time or nowtime()
})
return flt(last_sle.get("qty_after_transaction"))
test_records = frappe.get_test_records('Stock Entry')
|
wolfram74/numerical_methods_iserles_notes | refs/heads/master | venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 23 | from __future__ import division, absolute_import, print_function
import tempfile
import sys
import os
import shutil
import warnings
import operator
import io
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.core import *
from numpy.compat import asbytes, getexception, strchar, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4, 5)
self.three = arange(60, dtype=float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3, 2, 1), t)
y = empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = empty((3, 2, 1), dtype=uint64)
y = empty((3, 2, 1), dtype=uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
def test_byteorders(self):
self.assertNotEqual(dtype('<i4'), dtype('>i4'))
self.assertNotEqual(dtype([('a', '<i4')]), dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = zeros((3,3), dtype=c)
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = zeros((3,3), dtype='S5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='U5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='f4,f4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a==b, [True, True])
assert_equal(a!=b, [False, False])
b[1].b = 'c'
assert_equal(a==b, [True, False])
assert_equal(a!=b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False, False])
assert_equal(a!=b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a==b, [False, True])
assert_equal(a!=b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool);
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_test_round(self):
assert_equal(array([1.2, 1.5]).round(), [1, 2])
assert_equal(array(1.5).round(), 2)
assert_equal(array([12.2, 15.5]).round(-1), [10, 20])
assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5])
def test_transpose(self):
a = array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21, 32, 14])
x2=np.array(['my', 'first', 'name'])
x3=np.array([3.1, 4.5, 6.2])
r=np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14, 21, 32]))
assert_equal(r.word, array(['name', 'my', 'first']))
assert_equal(r.number, array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32, 21, 14]))
assert_equal(r.word, array(['first', 'my', 'name']))
assert_equal(r.number, array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21, 32, 14]))
assert_equal(r.word, array(['my', 'first', 'name']))
assert_equal(r.number, array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10); d[1] = 4;
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
@dec.skipif(True) # ufunc override disabled for 1.9
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1,:]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
@dec.skipif(True) # ufunc override disabled for 1.9
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(ndarray):
pass
class OtherNdarraySubclassWithOverride(ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
@dec.skipif(True) # ufunc override disabled for 1.9
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = SomeClass2(r.shape, dtype=r.dtype)
x[...] = r
return x
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
assert_equal(obj * arr, 123)
assert_equal(arr * obj, 321)
assert_equal(arr > obj, "nope")
assert_equal(arr < obj, "yep")
assert_equal(np.multiply(arr, obj), "ufunc")
arr *= obj
assert_equal(arr, 321)
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
assert_equal(np.multiply(arr, obj2), "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1], bool)
assert_array_equal(x[m], array([2]))
def test_mask2(self):
x = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1], [1, 0]], bool)
m3 = array([[0, 1]], bool)
assert_array_equal(x[m], array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], array([2, 5]))
assert_array_equal(x[m3], array([2]))
def test_assign_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1], bool)
x[m] = 5
assert_array_equal(x, array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1], [1, 0]], bool)
m3 = array([[0, 1]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This", "is", "example"])
g2 = array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([sixu("This"), sixu("is"), sixu("example")])
g2 = array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0, -0.1, 0.1])
res = 250*sk[:, newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type('float', 1024, 0, 0, inplace=inplace)
self._clip_type('int', 1024, -120, 100.5, inplace=inplace)
self._clip_type('int', 1024, 0, 0, inplace=inplace)
x = self._clip_type('uint', 1024, -120, 100, expected_min=0,
inplace=inplace)
x = self._clip_type('uint', 1024, 0, 0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ');
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ');
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(ValueError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1)
assert_raises(ValueError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warning_types(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there are
# multiple views involved):
assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10),
[])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
mat = np.eye(3)
# stats for integer types
# fixme:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
#for c in icodes:
#tgt = np.dtype(c).type
#res = f(mat, axis=1, dtype=c).dtype.type
#assert_(res is tgt)
## scalar case
#res = f(mat, axis=None, dtype=c).dtype.type
#assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in fcodes:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,), dtype=int)
self.y = 3*ones((3,), dtype=int)
self.x2 = 2*ones((2, 3), dtype=int)
self.y2 = 3*ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr' : '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[ 0., 1., 2., 19.,],
[ 104., 5., 6., 7.,],
[ 8., 9., 40., 11.,]])
b = arange(6).astype(float)
index = (array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class PriorityNdarray():
__array_priority__ = 1000
def __init__(self, array):
self.array = array
def __lt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array < array)
def __gt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array > array)
def __le__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array <= array)
def __ge__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array >= array)
def __eq__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array == array)
def __ne__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array != array)
class TestArrayPriority(TestCase):
def test_lt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l < r
res2 = l < rp
res3 = lp < r
res4 = lp < rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_gt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l > r
res2 = l > rp
res3 = lp > r
res4 = lp > rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_le(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l <= r
res2 = l <= rp
res3 = lp <= r
res4 = lp <= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ge(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l >= r
res2 = l >= rp
res3 = lp >= r
res4 = lp >= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_eq(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l == r
res2 = l == rp
res3 = lp == r
res4 = lp == rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ne(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l != r
res2 = l != rp
res3 = lp != r
res4 = lp != rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object);
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if __name__ == "__main__":
run_module_suite()
|
40223101/w16b_test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/importlib/machinery.py | 635 | """The machinery of importlib: finders, loaders, hooks, etc."""
import _imp
from ._bootstrap import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, #BYTECODE_SUFFIXES,
EXTENSION_SUFFIXES)
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap import WindowsRegistryFinder
from ._bootstrap import PathFinder
from ._bootstrap import FileFinder
from ._bootstrap import SourceFileLoader
from ._bootstrap import SourcelessFileLoader
from ._bootstrap import ExtensionFileLoader
#def all_suffixes():
# """Returns a list of all recognized module suffixes for this process"""
# return SOURCE_SUFFIXES + BYTECODE_SUFFIXES + EXTENSION_SUFFIXES
|
bryanph/OIPA | refs/heads/develop | OIPA/iati/management/commands/total_budget_updater.py | 3 | import datetime
# Django specific
from django.core.management.base import BaseCommand
from django.db import connection
from iati.models import Activity, Budget, Currency
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list
counter = 0
def handle(self, *args, **options):
parser = TotalBudgetUpdater()
parser.updateTotal()
class TotalBudgetUpdater():
def get_fields(self, cursor):
desc = cursor.description
results = [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
return results
def update(self):
cursor = connection.cursor()
cursor.execute('SELECT activity_id, sum(value) as total_value FROM IATI_budget b GROUP BY activity_id')
results = self.get_fields(cursor=cursor)
for r in results:
cur_act = Activity.objects.get(id=r['activity_id'])
cur_act.total_budget = r['total_value']
cur_act.total_budget_currency = self.get_budget_currency(cur_act);
cur_act.save()
return True
def get_budget_currency(self, activity):
try:
current_currency = None
for b in Budget.objects.filter(activity_id=activity.id):
if not current_currency:
# first row
current_currency = b.currency_id
elif current_currency == b.currency_id:
# currency matches previously found currency
continue
else:
# multiple currencies detected, return None
return None
if current_currency:
return Currency.objects.get(code=current_currency)
else:
return None
except Exception as e:
logger.info("error in " + activity.id + ", def: get_budget_currency")
if e.args:
logger.info(e.args[0])
if e.args.__len__() > 1:
logger.info(e.args[1])
if e.message:
logger.info(e.message)
return None
def update_single_activity(self, id):
try:
cursor = connection.cursor()
cursor.execute("SELECT activity_id, sum(value) as total_value FROM iati_budget b WHERE activity_id ='" + id + "' GROUP BY activity_id")
results = self.get_fields(cursor=cursor)
for r in results:
cur_act = Activity.objects.get(id=r['activity_id'])
cur_act.total_budget = r['total_value']
cur_act.total_budget_currency = self.get_budget_currency(cur_act)
cur_act.save()
except Exception as e:
logger.info("error in " + id + ", def: update_single_activity")
if e.args:
logger.info(e.args[0])
if e.args.__len__() > 1:
logger.info(e.args[1])
if e.message:
logger.info(e.message) |
rmanoni/mi-instrument | refs/heads/master | mi/core/config.py | 2 | #!/usr/bin/env python
__author__ = 'Adam R. Smith'
import yaml
from mi.core.containers import DotDict, dict_merge
from mi.core.exceptions import ConfigNotFound
class Config(object):
"""
YAML-based config loader that supports multiple paths.
Later paths get deep-merged over earlier ones.
"""
def __init__(self, paths=(), dict_class=DotDict, ignore_not_found=False):
self.paths = [path for path in paths if path] if paths is not None else []
self.paths_loaded = set()
self.dict_class = dict_class
self.data = self.dict_class()
if paths:
self.load(ignore_not_found)
def add_path(self, path, ignore_not_found=False):
""" Add this path at the end of the list and load/merge its contents. """
self.paths.append(path)
self.load(ignore_not_found)
def load(self, ignore_not_found=False):
""" Load each path in order. Remember paths already loaded and only load new ones. """
data = self.dict_class()
for path in self.paths:
if path in self.paths_loaded: continue
try:
with open(path, 'r') as file:
path_data = yaml.load(file.read())
if path_data is not None:
data = dict_merge(data, path_data)
self.paths_loaded.add(path)
except IOError:
if not ignore_not_found:
raise ConfigNotFound("Config URL '%s' not found" % path)
self.data = data
def reload(self):
self.paths_loaded.clear()
self.load() |
kenorb/BitTorrent | refs/heads/master | twisted/test/test_ident.py | 3 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.ident module.
"""
import struct
from twisted.protocols import ident
from twisted.python import failure
from twisted.internet import error
from twisted.internet import defer
from twisted.python import log
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
class ClassParserTestCase(unittest.TestCase):
def testErrors(self):
p = ident.IdentClient()
L = []
d = defer.Deferred()
d.addErrback(L.append)
p.queries.append((d, 123, 456))
p.lineReceived('123, 456 : ERROR : UNKNOWN-ERROR')
self.failUnless(L[0].value.__class__ is ident.IdentError, "%s is the wrong exception" % (L[0],))
L = []
d = defer.Deferred()
d.addErrback(L.append)
p.queries.append((d, 234, 456))
p.lineReceived('234, 456 : ERROR : NO-USER')
self.failUnless(L[0].value.__class__ is ident.NoUser, "%s is the wrong exception" % (L[0],))
L = []
d = defer.Deferred()
d.addErrback(L.append)
p.queries.append((d, 345, 567))
p.lineReceived('345, 567 : ERROR : INVALID-PORT')
self.failUnless(L[0].value.__class__ is ident.InvalidPort, "%s is the wrong exception" % (L[0],))
L = []
d = defer.Deferred()
d.addErrback(L.append)
p.queries.append((d, 567, 789))
p.lineReceived('567, 789 : ERROR : HIDDEN-USER')
self.failUnless(L[0].value.__class__ is ident.HiddenUser, "%s is the wrong exception" % (L[0],))
def testLostConnection(self):
p = ident.IdentClient()
L = []
d = defer.Deferred()
d.addErrback(L.append)
p.queries.append((d, 765, 432))
p.connectionLost(failure.Failure(error.ConnectionLost()))
self.failUnless(L[0].value.__class__ is ident.IdentError)
class TestIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
return self.resultValue
class TestErrorIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
raise self.exceptionType()
class NewException(RuntimeError):
pass
class ServerParserTestCase(unittest.TestCase):
def testErrors(self):
p = TestErrorIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.exceptionType = ident.IdentError
p.lineReceived('123, 345')
self.assertEquals(L[0], '123, 345 : ERROR : UNKNOWN-ERROR')
p.exceptionType = ident.NoUser
p.lineReceived('432, 210')
self.assertEquals(L[1], '432, 210 : ERROR : NO-USER')
p.exceptionType = ident.InvalidPort
p.lineReceived('987, 654')
self.assertEquals(L[2], '987, 654 : ERROR : INVALID-PORT')
p.exceptionType = ident.HiddenUser
p.lineReceived('756, 827')
self.assertEquals(L[3], '756, 827 : ERROR : HIDDEN-USER')
p.exceptionType = NewException
p.lineReceived('987, 789')
self.assertEquals(L[4], '987, 789 : ERROR : UNKNOWN-ERROR')
errs = log.flushErrors(NewException)
self.assertEquals(len(errs), 1)
for port in -1, 0, 65536, 65537:
del L[:]
p.lineReceived('%d, 5' % (port,))
p.lineReceived('5, %d' % (port,))
self.assertEquals(
L, ['%d, 5 : ERROR : INVALID-PORT' % (port,),
'5, %d : ERROR : INVALID-PORT' % (port,)])
def testSuccess(self):
p = TestIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.resultValue = ('SYS', 'USER')
p.lineReceived('123, 456')
self.assertEquals(L[0], '123, 456 : USERID : SYS : USER')
if struct.pack('=L', 1)[0] == '\x01':
_addr1 = '0100007F'
_addr2 = '04030201'
else:
_addr1 = '7F000001'
_addr2 = '01020304'
class ProcMixinTestCase(unittest.TestCase):
line = ('4: %s:0019 %s:02FA 0A 00000000:00000000 '
'00:00000000 00000000 0 0 10927 1 f72a5b80 '
'3000 0 0 2 -1') % (_addr1, _addr2)
def testDottedQuadFromHexString(self):
p = ident.ProcServerMixin()
self.assertEquals(p.dottedQuadFromHexString(_addr1), '127.0.0.1')
def testUnpackAddress(self):
p = ident.ProcServerMixin()
self.assertEquals(p.unpackAddress(_addr1 + ':0277'), ('127.0.0.1', 631))
def testLineParser(self):
p = ident.ProcServerMixin()
self.assertEquals(
p.parseLine(self.line),
(('127.0.0.1', 25), ('1.2.3.4', 762), 0))
def testExistingAddress(self):
username = []
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
p.getUsername = lambda uid: (username.append(uid), 'root')[1]
self.assertEquals(
p.lookup(('127.0.0.1', 25), ('1.2.3.4', 762)),
(p.SYSTEM_NAME, 'root'))
self.assertEquals(username, [0])
def testNonExistingAddress(self):
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 26), ('1.2.3.4', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25), ('1.2.3.5', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25), ('1.2.3.4', 763))
|
erkanay/django | refs/heads/master | django/contrib/gis/db/backends/mysql/__init__.py | 12133432 | |
neoareslinux/neutron | refs/heads/master | neutron/plugins/ml2/drivers/arista/__init__.py | 12133432 | |
indictranstech/ebuy-now-frappe | refs/heads/develop | frappe/core/doctype/version/__init__.py | 12133432 | |
3liz/QgisQuickOSMPlugin | refs/heads/master | definitions/__init__.py | 12133432 | |
BlackHole/obh-core | refs/heads/master | src/ui.py | 1 | # for localized messages
from os import listdir, path, mkdir
from . import _
from Screens.Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Screens.ParentalControlSetup import ProtectedScreen
from Components.config import config
from Components.SystemInfo import SystemInfo
class VIXMenu(Screen, ProtectedScreen):
skin = """
<screen name="VIXMenu" position="center,center" size="610,410">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on"/>
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1"/>
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2,2), size = (330,24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular",22)],
"itemHeight":25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2,2), size = (240,300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular",22)],
"itemHeight":300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args=0):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
self.setTitle(_("OBH"))
self.menu = args
self.list = []
if self.menu == 0:
self.list.append(("backup-manager", _("Backup manager"), _("Manage the backups of your settings."), None))
self.list.append(("image-manager", _("Image manager"), _("Create and flash complete images of your system."), None))
self.list.append(("ipkg-install", _("Install local extension"), _("Install IPK's from your tmp folder."), None))
self.list.append(("mount-manager", _("Mount manager"), _("Manage your devices mount points."), None))
self.list.append(("script-runner", _("Script runner"), _("Run your shell scripts."), None))
self.list.append(("swap-manager", _("SWAP manager"), _("Create and Manage your SWAP files."), None))
if SystemInfo["HasH9SD"]:
self.list.append(("H9SDcard manager", _("H9SDcard Manager"), _("Move Nand root to SD card"), None))
self["menu"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "MenuActions", "NumberActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"menu": self.closeRecursive,
"1": self.go,
"2": self.go,
"3": self.go,
"4": self.go,
"5": self.go,
"6": self.go,
"7": self.go,
"8": self.go,
"9": self.go,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
self.onChangedEntry = []
self["menu"].onSelectionChanged.append(self.selectionChanged)
def isProtected(self):
return config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.vixmenu.value
def createSummary(self):
from Screens.PluginBrowser import PluginBrowserSummary
return PluginBrowserSummary
def selectionChanged(self):
item = self["menu"].getCurrent()
if item:
name = item[1]
desc = item[2]
else:
name = "-"
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def layoutFinished(self):
idx = 0
self["menu"].index = idx
def go(self, num=None):
if num is not None:
num -= 1
if not num < self["menu"].count():
return
self["menu"].setIndex(num)
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if self.menu == 0:
if currentEntry == "backup-manager":
from BackupManager import VIXBackupManager
self.session.open(VIXBackupManager)
elif currentEntry == "image-manager":
from ImageManager import VIXImageManager
self.session.open(VIXImageManager)
elif currentEntry == "H9SDcard manager":
from H9SDmanager import H9SDmanager
self.session.open(H9SDmanager)
elif currentEntry == "ipkg-install":
from IPKInstaller import VIXIPKInstaller
self.session.open(VIXIPKInstaller)
elif currentEntry == "mount-manager":
from MountManager import VIXDevicesPanel
self.session.open(VIXDevicesPanel)
elif currentEntry == "script-runner":
from ScriptRunner import VIXScriptRunner
self.session.open(VIXScriptRunner, None)
elif currentEntry == "swap-manager":
from SwapManager import VIXSwap
self.session.open(VIXSwap)
def closeRecursive(self):
self.close(True)
|
lazaronixon/enigma2 | refs/heads/master | lib/python/Screens/InfoBarGenerics.py | 4 | from ChannelSelection import ChannelSelection, BouquetSelector, SilentBouquetSelector
from Components.ActionMap import ActionMap, HelpableActionMap
from Components.ActionMap import NumberActionMap
from Components.Harddisk import harddiskmanager
from Components.Input import Input
from Components.Label import Label
from Components.MovieList import AUDIO_EXTENSIONS, MOVIE_EXTENSIONS, DVD_EXTENSIONS
from Components.PluginComponent import plugins
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.Boolean import Boolean
from Components.config import config, ConfigBoolean, ConfigClock, ConfigText
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, ConfigSelection
from Components.VolumeControl import VolumeControl
from Components.Sources.StaticText import StaticText
from EpgSelection import EPGSelection
from Plugins.Plugin import PluginDescriptor
from Screen import Screen
from Screens import ScreenSaver
from Screens import Standby
from Screens.ChoiceBox import ChoiceBox
from Screens.Dish import Dish
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.MinuteInput import MinuteInput
from Screens.TimerSelection import TimerSelection
from Screens.PictureInPicture import PictureInPicture
import Screens.Standby
from Screens.SubtitleDisplay import SubtitleDisplay
from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive
from Screens.TimeDateInput import TimeDateInput
from Screens.UnhandledKey import UnhandledKey
from ServiceReference import ServiceReference, isPlayableForCur
from Tools import Notifications, ASCIItranslit
from Tools.Directories import fileExists, getRecordingFilename, moveFiles
from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, \
iPlayableService, eServiceReference, eEPGCache, eActionMap
from time import time, localtime, strftime
import os
from bisect import insort
from sys import maxint
from RecordTimer import RecordTimerEntry, RecordTimer, findSafeRecordPath
# hack alert!
from Menu import MainMenu, mdom
def isStandardInfoBar(self):
return self.__class__.__name__ == "InfoBar"
def setResumePoint(session):
global resumePointCache, resumePointCacheLast
service = session.nav.getCurrentService()
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (service is not None) and (ref is not None): # and (ref.type != 1):
# ref type 1 has its own memory...
seek = service.seek()
if seek:
pos = seek.getPlayPosition()
if not pos[0]:
key = ref.toString()
lru = int(time())
l = seek.getLength()
if l:
l = l[1]
else:
l = None
resumePointCache[key] = [lru, pos[1], l]
if len(resumePointCache) > 50:
candidate = key
for k,v in resumePointCache.items():
if v[0] < lru:
candidate = k
del resumePointCache[candidate]
if lru - resumePointCacheLast > 3600:
saveResumePoints()
def delResumePoint(ref):
global resumePointCache, resumePointCacheLast
try:
del resumePointCache[ref.toString()]
except KeyError:
pass
if int(time()) - resumePointCacheLast > 3600:
saveResumePoints()
def getResumePoint(session):
global resumePointCache
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (ref is not None) and (ref.type != 1):
try:
entry = resumePointCache[ref.toString()]
entry[0] = int(time()) # update LRU timestamp
return entry[1]
except KeyError:
return None
def saveResumePoints():
global resumePointCache, resumePointCacheLast
import cPickle
try:
f = open('/home/root/resumepoints.pkl', 'wb')
cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL)
except Exception, ex:
print "[InfoBar] Failed to write resumepoints:", ex
resumePointCacheLast = int(time())
def loadResumePoints():
import cPickle
try:
return cPickle.load(open('/home/root/resumepoints.pkl', 'rb'))
except Exception, ex:
print "[InfoBar] Failed to load resumepoints:", ex
return {}
resumePointCache = loadResumePoints()
resumePointCacheLast = int(time())
class InfoBarDish:
def __init__(self):
self.dishDialog = self.session.instantiateDialog(Dish)
class InfoBarUnhandledKey:
def __init__(self):
self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey)
self.hideUnhandledKeySymbolTimer = eTimer()
self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide)
self.checkUnusedTimer = eTimer()
self.checkUnusedTimer.callback.append(self.checkUnused)
self.onLayoutFinish.append(self.unhandledKeyDialog.hide)
eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio
eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio
self.flags = (1<<1)
self.uflags = 0
#this function is called on every keypress!
def actionA(self, key, flag):
self.unhandledKeyDialog.hide()
if flag != 4:
if self.flags & (1<<1):
self.flags = self.uflags = 0
self.flags |= (1<<flag)
if flag == 1: # break
self.checkUnusedTimer.start(0, True)
return 0
#this function is only called when no other action has handled this key
def actionB(self, key, flag):
if flag != 4:
self.uflags |= (1<<flag)
def checkUnused(self):
if self.flags == self.uflags:
self.unhandledKeyDialog.show()
self.hideUnhandledKeySymbolTimer.start(2000, True)
class InfoBarScreenSaver:
def __init__(self):
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
self.screenSaverTimer = eTimer()
self.screenSaverTimer.callback.append(self.screensaverTimeout)
self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.screensaver.hide()
def __onExecBegin(self):
self.ScreenSaverTimerStart()
def __onExecEnd(self):
if self.screensaver.shown:
self.screensaver.hide()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
self.screenSaverTimer.stop()
def ScreenSaverTimerStart(self):
time = int(config.usage.screen_saver.value)
flag = self.seekstate[0]
if not flag:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref and not (hasattr(self.session, "pipshown") and self.session.pipshown):
ref = ref.toString().split(":")
flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS
if time and flag:
self.screenSaverTimer.startLongTimer(time)
else:
self.screenSaverTimer.stop()
def screensaverTimeout(self):
if self.execing and not Standby.inStandby and not Standby.inTryQuitMainloop:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
self.screensaver.show()
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressScreenSaver)
def keypressScreenSaver(self, key, flag):
if flag:
self.screensaver.hide()
self.show()
self.ScreenSaverTimerStart()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
class SecondInfoBar(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skin = None
class InfoBarShowHide(InfoBarScreenSaver):
""" InfoBar show/hide control, accepts toggleShow and hide actions, might start
fancy animations. """
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
def __init__(self):
self["ShowHideActions"] = ActionMap( ["InfobarShowHideActions"] ,
{
"toggleShow": self.okButtonCheck,
"hide": self.keyHide,
}, 1) # lower prio to make it possible to override ok and cancel..
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted,
})
InfoBarScreenSaver.__init__(self)
self.__state = self.STATE_SHOWN
self.__locked = 0
self.hideTimer = eTimer()
self.hideTimer.callback.append(self.doTimerHide)
self.hideTimer.start(5000, True)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.onShowHideNotifiers = []
self.secondInfoBarScreen = ""
if isStandardInfoBar(self):
self.secondInfoBarScreen = self.session.instantiateDialog(SecondInfoBar)
self.secondInfoBarScreen.show()
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
def __onShow(self):
self.__state = self.STATE_SHOWN
for x in self.onShowHideNotifiers:
x(True)
self.startHideTimer()
def __onHide(self):
self.__state = self.STATE_HIDDEN
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
for x in self.onShowHideNotifiers:
x(False)
def keyHide(self):
if self.__state == self.STATE_HIDDEN and self.session.pipshown and "popup" in config.usage.pip_hideOnExit.value:
if config.usage.pip_hideOnExit.value == "popup":
self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True)
else:
self.hidePipOnExitCallback(True)
elif config.usage.ok_is_channelselection.value and hasattr(self, "openServiceList"):
self.toggleShow()
elif self.__state == self.STATE_SHOWN:
self.hide()
def hidePipOnExitCallback(self, answer):
if answer == True:
self.showPiP()
def connectShowHideNotifier(self, fnc):
if not fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.append(fnc)
def disconnectShowHideNotifier(self, fnc):
if fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.remove(fnc)
def serviceStarted(self):
if self.execing:
if config.usage.show_infobar_on_zap.value:
self.doShow()
def startHideTimer(self):
if self.__state == self.STATE_SHOWN and not self.__locked:
self.hideTimer.stop()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
idx = config.usage.show_second_infobar.index - 1
else:
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.startLongTimer(idx)
def doShow(self):
self.show()
self.startHideTimer()
def doTimerHide(self):
self.hideTimer.stop()
if self.__state == self.STATE_SHOWN:
self.hide()
def okButtonCheck(self):
if config.usage.ok_is_channelselection.value and hasattr(self, "openServiceList"):
self.openServiceList()
else:
self.toggleShow()
def toggleShow(self):
if self.__state == self.STATE_HIDDEN:
self.showFirstInfoBar()
else:
self.showSecondInfoBar()
def showSecondInfoBar(self):
if isStandardInfoBar(self) and config.usage.show_second_infobar.value == "EPG":
if not(hasattr(self, "hotkeyGlobal") and self.hotkeyGlobal("info") != 0):
self.showDefaultEPG()
elif self.secondInfoBarScreen and config.usage.show_second_infobar.value and not self.secondInfoBarScreen.shown:
self.show()
self.secondInfoBarScreen.show()
self.startHideTimer()
else:
self.hide()
self.hideTimer.stop()
def showFirstInfoBar(self):
if self.__state == self.STATE_HIDDEN or self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen and self.secondInfoBarScreen.hide()
self.show()
else:
self.hide()
self.hideTimer.stop()
def lockShow(self):
self.__locked = self.__locked + 1
if self.execing:
self.show()
self.hideTimer.stop()
def unlockShow(self):
self.__locked = self.__locked - 1
if self.execing:
self.startHideTimer()
class BufferIndicator(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["status"] = Label()
self.mayShow = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evBuffering: self.bufferChanged,
iPlayableService.evStart: self.__evStart,
iPlayableService.evGstreamerPlayStarted: self.__evGstreamerPlayStarted,
})
def bufferChanged(self):
if self.mayShow:
service = self.session.nav.getCurrentService()
info = service and service.info()
if info:
value = info.getInfo(iServiceInformation.sBuffer)
if value and value != 100:
self["status"].setText(_("Buffering %d%%") % value)
if not self.shown:
self.show()
def __evStart(self):
self.mayShow = True
self.hide()
def __evGstreamerPlayStarted(self):
self.mayShow = False
self.hide()
class InfoBarBuffer():
def __init__(self):
self.bufferScreen = self.session.instantiateDialog(BufferIndicator)
self.bufferScreen.hide()
class NumberZap(Screen):
def quit(self):
self.Timer.stop()
self.close()
def keyOK(self):
self.Timer.stop()
self.close(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].text = self["servicename_summary"].text = ServiceReference(self.service).getServiceName()
if not self.startBouquet:
self.startBouquet = self.bouquet
def keyBlue(self):
self.Timer.start(3000, True)
if self.searchNumber:
if self.startBouquet == self.bouquet:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()), firstBouquetOnly = True)
else:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].text = self["servicename_summary"].text = ServiceReference(self.service).getServiceName()
def keyNumberGlobal(self, number):
self.Timer.start(1000, True)
self.numberString = self.numberString + str(number)
self["number"].text = self["number_summary"].text = self.numberString
self.handleServiceName()
if len(self.numberString) >= 5:
self.keyOK()
def __init__(self, session, number, searchNumberFunction = None):
Screen.__init__(self, session)
self.numberString = str(number)
self.searchNumber = searchNumberFunction
self.startBouquet = None
self["channel"] = Label(_("Channel:"))
self["number"] = Label(self.numberString)
self["servicename"] = Label()
self["channel_summary"] = StaticText(_("Channel:"))
self["number_summary"] = StaticText(self.numberString)
self["servicename_summary"] = StaticText()
self.handleServiceName()
self["actions"] = NumberActionMap( [ "SetupActions", "ShortcutActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"blue": self.keyBlue,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
self.Timer.start(3000, True)
class InfoBarNumberZap:
""" Handles an initial number for NumberZapping """
def __init__(self):
self["NumberActions"] = NumberActionMap( [ "NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
})
def keyNumberGlobal(self, number):
if number == 0:
if isinstance(self, InfoBarPiP) and self.pipHandles0Action():
self.pipDoHandle0Action()
elif len(self.servicelist.history) > 1:
self.checkTimeshiftRunning(self.recallPrevService)
else:
if self.has_key("TimeshiftActions") and self.timeshiftEnabled():
ts = self.getTimeshift()
if ts and ts.isTimeshiftActive():
return
self.session.openWithCallback(self.numberEntered, NumberZap, number, self.searchNumber)
def recallPrevService(self, reply):
if reply:
self.servicelist.recallPrevService()
def numberEntered(self, service = None, bouquet = None):
if service:
self.selectAndStartService(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number, firstBouquetOnly=False, bouquet=None):
bouquet = bouquet or self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
if not firstBouquetOnly:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value and not service:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service:
playable = not (service.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)) or (service.flags & eServiceReference.isNumberedMarker)
if not playable:
service = None
break
if config.usage.alternative_number_mode.value or firstBouquetOnly:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def selectAndStartService(self, service, bouquet):
if service and not service.flags & eServiceReference.isMarker:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
self.servicelist.zap(enable_pipzap = True)
self.servicelist.correctChannelNumber()
self.servicelist.startRoot = None
def zapToNumber(self, number):
service, bouquet = self.searchNumber(number)
self.selectAndStartService(service, bouquet)
config.misc.initialchannelselection = ConfigBoolean(default = True)
class InfoBarChannelSelection:
""" ChannelSelection - handles the channelSelection dialog and the initial
channelChange actions which open the channelSelection dialog """
def __init__(self):
#instantiate forever
self.servicelist = self.session.instantiateDialog(ChannelSelection)
if config.misc.initialchannelselection.value:
self.onShown.append(self.firstRun)
self["ChannelSelectActions"] = HelpableActionMap(self, "InfobarChannelSelection",
{
"keyUp": (self.keyUpCheck, self.getKeyUpHelptext),
"keyDown": (self.keyDownCheck, self.getKeyDownHelpText),
"keyLeft": (self.keyLeftCheck, self.getKeyLeftHelptext),
"keyRight": (self.keyRightCheck, self.getKeyRightHelptext),
"historyBack": (self.historyBack, _("Switch to previous channel in history")),
"historyNext": (self.historyNext, _("Switch to next channel in history")),
"keyChannelUp": (self.keyChannelUpCheck, self.getKeyChannelUpHelptext),
"keyChannelDown": (self.keyChannelDownCheck, self.getKeyChannelDownHelptext),
})
def showTvChannelList(self, zap=False):
self.servicelist.setModeTv()
if zap:
self.servicelist.zap()
def showRadioChannelList(self, zap=False):
self.servicelist.setModeRadio()
if zap:
self.servicelist.zap()
def firstRun(self):
self.onShown.remove(self.firstRun)
config.misc.initialchannelselection.value = False
config.misc.initialchannelselection.save()
self.switchChannelDown()
def historyBack(self):
self.checkTimeshiftRunning(self.historyBackCheckTimeshiftCallback)
def historyBackCheckTimeshiftCallback(self, answer):
if answer:
self.servicelist.historyBack()
def historyNext(self):
self.checkTimeshiftRunning(self.historyNextCheckTimeshiftCallback)
def historyNextCheckTimeshiftCallback(self, answer):
if answer:
self.servicelist.historyNext()
def keyUpCheck(self):
if config.usage.oldstyle_zap_controls.value:
self.zapDown()
elif config.usage.volume_instead_of_channelselection.value:
VolumeControl.instance and VolumeControl.instance.volUp()
else:
self.switchChannelUp()
def keyDownCheck(self):
if config.usage.oldstyle_zap_controls.value:
self.zapUp()
elif config.usage.volume_instead_of_channelselection.value:
VolumeControl.instance and VolumeControl.instance.volDown()
else:
self.switchChannelDown()
def keyLeftCheck(self):
if config.usage.oldstyle_zap_controls.value:
if config.usage.volume_instead_of_channelselection.value:
VolumeControl.instance and VolumeControl.instance.volDown()
else:
self.switchChannelUp()
else:
self.zapUp()
def keyRightCheck(self):
if config.usage.oldstyle_zap_controls.value:
if config.usage.volume_instead_of_channelselection.value:
VolumeControl.instance and VolumeControl.instance.volUp()
else:
self.switchChannelDown()
else:
self.zapDown()
def keyChannelUpCheck(self):
if config.usage.zap_with_ch_buttons.value:
self.zapDown()
else:
self.openServiceList()
def keyChannelDownCheck(self):
if config.usage.zap_with_ch_buttons.value:
self.zapUp()
else:
self.openServiceList()
def getKeyUpHelptext(self):
if config.usage.oldstyle_zap_controls.value:
value = _("Switch to next channel")
else:
if config.usage.volume_instead_of_channelselection.value:
value = _("Volume up")
else:
value = _("Open service list")
if not "keep" in config.usage.servicelist_cursor_behavior.value:
value += " " + _("and select previous channel")
return value
def getKeyDownHelpText(self):
if config.usage.oldstyle_zap_controls.value:
value = _("Switch to previous channel")
else:
if config.usage.volume_instead_of_channelselection.value:
value = _("Volume down")
else:
value = _("Open service list")
if not "keep" in config.usage.servicelist_cursor_behavior.value:
value += " " + _("and select next channel")
return value
def getKeyLeftHelptext(self):
if config.usage.oldstyle_zap_controls.value:
if config.usage.volume_instead_of_channelselection.value:
value = _("Volume down")
else:
value = _("Open service list")
if not "keep" in config.usage.servicelist_cursor_behavior.value:
value += " " + _("and select previous channel")
else:
value = _("Switch to previous channel")
return value
def getKeyRightHelptext(self):
if config.usage.oldstyle_zap_controls.value:
if config.usage.volume_instead_of_channelselection.value:
value = _("Volume up")
else:
value = _("Open service list")
if not "keep" in config.usage.servicelist_cursor_behavior.value:
value += " " + _("and select next channel")
else:
value = _("Switch to next channel")
return value
def getKeyChannelUpHelptext(self):
return config.usage.zap_with_ch_buttons.value and _("Switch to next channel") or _("Open service list")
def getKeyChannelDownHelptext(self):
return config.usage.zap_with_ch_buttons.value and _("Switch to previous channel") or _("Open service list")
def switchChannelUp(self):
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveUp()
self.session.execDialog(self.servicelist)
def switchChannelDown(self):
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveDown()
self.session.execDialog(self.servicelist)
def zapUp(self):
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveUp()
self.servicelist.zap(enable_pipzap = True)
def zapDown(self):
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveDown()
self.servicelist.zap(enable_pipzap = True)
def openFavouritesList(self):
self.servicelist.showFavourites()
self.openServiceList()
def openServiceList(self):
self.session.execDialog(self.servicelist)
class InfoBarMenu:
""" Handles a menu action, to open the (main) menu """
def __init__(self):
self["MenuActions"] = HelpableActionMap(self, "InfobarMenuActions",
{
"mainMenu": (self.mainMenu, _("Enter main menu...")),
})
self.session.infobar = None
def mainMenu(self):
print "loading mainmenu XML..."
menu = mdom.getroot()
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.infobar = self
# so we can access the currently active infobar from screens opened from within the mainmenu
# at the moment used from the SubserviceSelection
self.session.openWithCallback(self.mainMenuClosed, MainMenu, menu)
def mainMenuClosed(self, *val):
self.session.infobar = None
class InfoBarSimpleEventView:
""" Opens the Eventview for now/next """
def __init__(self):
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.openEventView, _("Show event details")),
"showEventInfoSingleEPG": (self.openEventView, _("Show event details")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def openEventView(self):
epglist = [ ]
self.epglist = epglist
service = self.session.nav.getCurrentService()
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
info = service.info()
ptr=info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr=info.getEvent(1)
if ptr:
epglist.append(ptr)
if epglist:
self.session.open(EventViewSimple, epglist[0], ServiceReference(ref), self.eventViewCallback)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
class SimpleServicelist:
def __init__(self, services):
self.services = services
self.length = len(services)
self.current = 0
def selectService(self, service):
if not self.length:
self.current = -1
return False
else:
self.current = 0
while self.services[self.current].ref != service:
self.current += 1
if self.current >= self.length:
return False
return True
def nextService(self):
if not self.length:
return
if self.current+1 < self.length:
self.current += 1
else:
self.current = 0
def prevService(self):
if not self.length:
return
if self.current-1 > -1:
self.current -= 1
else:
self.current = self.length - 1
def currentService(self):
if not self.length or self.current >= self.length:
return None
return self.services[self.current]
class InfoBarEPG:
""" EPG - Opens an EPG list when the showEPGList action fires """
def __init__(self):
self.is_now_next = False
self.dlg_stack = [ ]
self.bouquetSel = None
self.eventView = None
self.epglist = []
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
})
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.showDefaultEPG, _("Show EPG...")),
"showEventInfoSingleEPG": (self.showSingleEPG, _("Show single service EPG")),
"showEventInfoMultiEPG": (self.showMultiEPG, _("Show multi channel EPG")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def getEPGPluginList(self, getAll=False):
pluginlist = [(p.name, boundFunction(self.runPlugin, p), p.path) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO) \
if 'selectedevent' not in p.__call__.func_code.co_varnames] or []
from Components.ServiceEventTracker import InfoBarCount
if getAll or InfoBarCount == 1:
pluginlist.append((_("Show EPG for current channel..."), self.openSingleServiceEPG, "current_channel"))
pluginlist.append((_("Multi EPG"), self.openMultiServiceEPG, "multi_epg"))
pluginlist.append((_("Current event EPG"), self.openEventView, "event_epg"))
return pluginlist
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def zapToService(self, service, preview = False, zapback = False):
if self.servicelist.startServiceRef is None:
self.servicelist.startServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service is not None:
if self.servicelist.getRoot() != self.epg_bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != self.epg_bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(self.epg_bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
if not zapback or preview:
self.servicelist.zap(enable_pipzap = True)
if (self.servicelist.dopipzap or zapback) and not preview:
self.servicelist.zapBack()
if not preview:
self.servicelist.startServiceRef = None
self.servicelist.startRoot = None
def getBouquetServices(self, bouquet):
services = [ ]
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def openBouquetEPG(self, bouquet, withCallback=True):
services = self.getBouquetServices(bouquet)
if services:
self.epg_bouquet = bouquet
if withCallback:
self.dlg_stack.append(self.session.openWithCallback(self.closed, EPGSelection, services, self.zapToService, None, self.changeBouquetCB))
else:
self.session.open(EPGSelection, services, self.zapToService, None, self.changeBouquetCB)
def changeBouquetCB(self, direction, epg):
if self.bouquetSel:
if direction > 0:
self.bouquetSel.down()
else:
self.bouquetSel.up()
bouquet = self.bouquetSel.getCurrent()
services = self.getBouquetServices(bouquet)
if services:
self.epg_bouquet = bouquet
epg.setServices(services)
def closed(self, ret=False):
closedScreen = self.dlg_stack.pop()
if self.bouquetSel and closedScreen == self.bouquetSel:
self.bouquetSel = None
elif self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret:
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
def openMultiServiceEPG(self, withCallback=True):
bouquets = self.servicelist.getBouquetList()
if bouquets is None:
cnt = 0
else:
cnt = len(bouquets)
if config.usage.multiepg_ask_bouquet.value:
self.openMultiServiceEPGAskBouquet(bouquets, cnt, withCallback)
else:
self.openMultiServiceEPGSilent(bouquets, cnt, withCallback)
def openMultiServiceEPGAskBouquet(self, bouquets, cnt, withCallback):
if cnt > 1: # show bouquet list
if withCallback:
self.bouquetSel = self.session.openWithCallback(self.closed, BouquetSelector, bouquets, self.openBouquetEPG, enableWrapAround=True)
self.dlg_stack.append(self.bouquetSel)
else:
self.bouquetSel = self.session.open(BouquetSelector, bouquets, self.openBouquetEPG, enableWrapAround=True)
elif cnt == 1:
self.openBouquetEPG(bouquets[0][1], withCallback)
def openMultiServiceEPGSilent(self, bouquets, cnt, withCallback):
root = self.servicelist.getRoot()
rootstr = root.toCompareString()
current = 0
for bouquet in bouquets:
if bouquet[1].toCompareString() == rootstr:
break
current += 1
if current >= cnt:
current = 0
if cnt > 1: # create bouquet list for bouq+/-
self.bouquetSel = SilentBouquetSelector(bouquets, True, self.servicelist.getBouquetNumOffset(root))
if cnt >= 1:
self.openBouquetEPG(root, withCallback)
def changeServiceCB(self, direction, epg):
if self.serviceSel:
if direction > 0:
self.serviceSel.nextService()
else:
self.serviceSel.prevService()
epg.setService(self.serviceSel.currentService())
def SingleServiceEPGClosed(self, ret=False):
self.serviceSel = None
def openSingleServiceEPG(self):
ref = self.servicelist.getCurrentSelection()
if ref:
if self.servicelist.getMutableList(): # bouquet in channellist
current_path = self.servicelist.getRoot()
services = self.getBouquetServices(current_path)
self.serviceSel = SimpleServicelist(services)
if self.serviceSel.selectService(ref):
self.epg_bouquet = current_path
self.session.openWithCallback(self.SingleServiceEPGClosed, EPGSelection, ref, self.zapToService, serviceChangeCB=self.changeServiceCB)
else:
self.session.openWithCallback(self.SingleServiceEPGClosed, EPGSelection, ref)
else:
self.session.open(EPGSelection, ref)
def runPlugin(self, plugin):
plugin(session = self.session, servicelist = self.servicelist)
def showEventInfoPlugins(self):
pluginlist = self.getEPGPluginList()
if pluginlist:
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_("Please choose an extension..."), list=pluginlist, skin_name="EPGExtensionsList", reorderConfig="eventinfo_order")
else:
self.openSingleServiceEPG()
def EventInfoPluginChosen(self, answer):
if answer is not None:
answer[1]()
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def __evEventInfoChanged(self):
if self.is_now_next and len(self.dlg_stack) == 1:
self.getNowNext()
if self.eventView and self.epglist:
self.eventView.setEvent(self.epglist[0])
def showDefaultEPG(self):
self.openEventView()
def showSingleEPG(self):
self.openSingleServiceEPG()
def showMultiEPG(self):
self.openMultiServiceEPG()
def openEventView(self):
from Components.ServiceEventTracker import InfoBarCount
if InfoBarCount > 1:
epglist = [ ]
self.epglist = epglist
service = self.session.nav.getCurrentService()
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
info = service.info()
ptr=info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr=info.getEvent(1)
if ptr:
epglist.append(ptr)
if epglist:
self.session.open(EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
self.dlg_stack.append(self.eventView)
if not epglist:
print "no epg for the service avail.. so we show multiepg instead of eventinfo"
self.openMultiServiceEPG(False)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0]=epglist[1]
epglist[1]=tmp
setEvent(epglist[0])
class InfoBarRdsDecoder:
"""provides RDS and Rass support/display"""
def __init__(self):
self.rds_display = self.session.instantiateDialog(RdsInfoDisplay)
self.session.instantiateSummaryDialog(self.rds_display)
self.rass_interactive = None
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRassSlidePic: self.RassSlidePicChanged
})
self["RdsActions"] = ActionMap(["InfobarRdsActions"],
{
"startRassInteractive": self.startRassInteractive
},-1)
self["RdsActions"].setEnabled(False)
self.onLayoutFinish.append(self.rds_display.show)
self.rds_display.onRassInteractivePossibilityChanged.append(self.RassInteractivePossibilityChanged)
def RassInteractivePossibilityChanged(self, state):
self["RdsActions"].setEnabled(state)
def RassSlidePicChanged(self):
if not self.rass_interactive:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if decoder:
decoder.showRassSlidePicture()
def __serviceStopped(self):
if self.rass_interactive is not None:
rass_interactive = self.rass_interactive
self.rass_interactive = None
rass_interactive.close()
def startRassInteractive(self):
self.rds_display.hide()
self.rass_interactive = self.session.openWithCallback(self.RassInteractiveClosed, RassInteractive)
def RassInteractiveClosed(self, *val):
if self.rass_interactive is not None:
self.rass_interactive = None
self.RassSlidePicChanged()
self.rds_display.show()
class InfoBarSeek:
"""handles actions like seeking, pause"""
SEEK_STATE_PLAY = (0, 0, 0, ">")
SEEK_STATE_PAUSE = (1, 0, 0, "||")
SEEK_STATE_EOF = (1, 0, 0, "END")
def __init__(self, actionmap = "InfobarSeekActions"):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evEOF: self.__evEOF,
iPlayableService.evSOF: self.__evSOF,
})
self.fast_winding_hint_message_showed = False
class InfoBarSeekActionMap(HelpableActionMap):
def __init__(self, screen, *args, **kwargs):
HelpableActionMap.__init__(self, screen, *args, **kwargs)
self.screen = screen
def action(self, contexts, action):
print "action:", action
if action[:5] == "seek:":
time = int(action[5:])
self.screen.doSeekRelative(time * 90000)
return 1
elif action[:8] == "seekdef:":
key = int(action[8:])
time = (-config.seek.selfdefined_13.value, False, config.seek.selfdefined_13.value,
-config.seek.selfdefined_46.value, False, config.seek.selfdefined_46.value,
-config.seek.selfdefined_79.value, False, config.seek.selfdefined_79.value)[key-1]
self.screen.doSeekRelative(time * 90000)
return 1
else:
return HelpableActionMap.action(self, contexts, action)
self["SeekActions"] = InfoBarSeekActionMap(self, actionmap,
{
"playpauseService": (self.playpauseService, _("Pauze/Continue playback")),
"pauseService": (self.pauseService, _("Pause playback")),
"unPauseService": (self.unPauseService, _("Continue playback")),
"okButton": (self.okButton, _("Continue playback")),
"seekFwd": (self.seekFwd, _("Seek forward")),
"seekFwdManual": (self.seekFwdManual, _("Seek forward (enter time)")),
"seekBack": (self.seekBack, _("Seek backward")),
"seekBackManual": (self.seekBackManual, _("Seek backward (enter time)")),
"jumpPreviousMark": (self.seekPreviousMark, _("Jump to previous marked position")),
"jumpNextMark": (self.seekNextMark, _("Jump to next marked position")),
}, prio=-1)
# give them a little more priority to win over color buttons
self["SeekActions"].setEnabled(False)
self.seekstate = self.SEEK_STATE_PLAY
self.lastseekstate = self.SEEK_STATE_PLAY
self.onPlayStateChanged = [ ]
self.lockedBecauseOfSkipping = False
self.__seekableStatusChanged()
def makeStateForward(self, n):
return (0, n, 0, ">> %dx" % n)
def makeStateBackward(self, n):
return (0, -n, 0, "<< %dx" % n)
def makeStateSlowMotion(self, n):
return (0, 0, n, "/%d" % n)
def isStateForward(self, state):
return state[1] > 1
def isStateBackward(self, state):
return state[1] < 0
def isStateSlowMotion(self, state):
return state[1] == 0 and state[2] > 1
def getHigher(self, n, lst):
for x in lst:
if x > n:
return x
return False
def getLower(self, n, lst):
lst = lst[:]
lst.reverse()
for x in lst:
if x < n:
return x
return False
def showAfterSeek(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def up(self):
pass
def down(self):
pass
def getSeek(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
seek = service.seek()
if seek is None or not seek.isCurrentlySeekable():
return None
return seek
def isSeekable(self):
if self.getSeek() is None or (isStandardInfoBar(self) and not self.timeshiftEnabled()):
return False
return True
def __seekableStatusChanged(self):
# print "seekable status changed!"
if not self.isSeekable():
self["SeekActions"].setEnabled(False)
# print "not seekable, return to play"
self.setSeekState(self.SEEK_STATE_PLAY)
else:
self["SeekActions"].setEnabled(True)
# print "seekable"
def __serviceStarted(self):
self.fast_winding_hint_message_showed = False
self.setSeekState(self.SEEK_STATE_PLAY)
self.__seekableStatusChanged()
def setSeekState(self, state):
service = self.session.nav.getCurrentService()
if service is None:
return False
if not self.isSeekable():
if state not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE):
state = self.SEEK_STATE_PLAY
pauseable = service.pause()
if pauseable is None:
print "not pauseable."
state = self.SEEK_STATE_PLAY
self.seekstate = state
if pauseable is not None:
if self.seekstate[0]:
print "resolved to PAUSE"
pauseable.pause()
elif self.seekstate[1]:
if not pauseable.setFastForward(self.seekstate[1]):
print "resolved to FAST FORWARD"
else:
self.seekstate = self.SEEK_STATE_PLAY
print "FAST FORWARD not possible: resolved to PLAY"
elif self.seekstate[2]:
if not pauseable.setSlowMotion(self.seekstate[2]):
print "resolved to SLOW MOTION"
else:
self.seekstate = self.SEEK_STATE_PAUSE
print "SLOW MOTION not possible: resolved to PAUSE"
else:
print "resolved to PLAY"
pauseable.unpause()
for c in self.onPlayStateChanged:
c(self.seekstate)
self.checkSkipShowHideLock()
if hasattr(self, "ScreenSaverTimerStart"):
self.ScreenSaverTimerStart()
return True
def playpauseService(self):
if self.seekstate != self.SEEK_STATE_PLAY:
self.unPauseService()
else:
self.pauseService()
def okButton(self):
if self.seekstate == self.SEEK_STATE_PLAY:
return 0
elif self.seekstate == self.SEEK_STATE_PAUSE:
self.pauseService()
else:
self.unPauseService()
def pauseService(self):
if self.seekstate == self.SEEK_STATE_PAUSE:
if config.seek.on_pause.value == "play":
self.unPauseService()
elif config.seek.on_pause.value == "step":
self.doSeekRelative(1)
elif config.seek.on_pause.value == "last":
self.setSeekState(self.lastseekstate)
self.lastseekstate = self.SEEK_STATE_PLAY
else:
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
def unPauseService(self):
print "unpause"
if self.seekstate == self.SEEK_STATE_PLAY:
return 0
self.setSeekState(self.SEEK_STATE_PLAY)
def doSeek(self, pts):
seekable = self.getSeek()
if seekable is None:
return
seekable.seekTo(pts)
def doSeekRelative(self, pts):
seekable = self.getSeek()
if seekable is None:
return
prevstate = self.seekstate
if self.seekstate == self.SEEK_STATE_EOF:
if prevstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PAUSE)
else:
self.setSeekState(self.SEEK_STATE_PLAY)
seekable.seekRelative(pts<0 and -1 or 1, abs(pts))
if abs(pts) > 100 and config.usage.show_infobar_on_skip.value:
self.showAfterSeek()
def seekFwd(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
if self.seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_PAUSE:
if len(config.seek.speeds_slowmotion.value):
self.setSeekState(self.makeStateSlowMotion(config.seek.speeds_slowmotion.value[-1]))
else:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_EOF:
pass
elif self.isStateForward(self.seekstate):
speed = self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_forward.value) or config.seek.speeds_forward.value[-1]
self.setSeekState(self.makeStateForward(speed))
elif self.isStateBackward(self.seekstate):
speed = -self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getLower(speed, config.seek.speeds_backward.value)
if speed:
self.setSeekState(self.makeStateBackward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateSlowMotion(self.seekstate):
speed = self.getLower(self.seekstate[2], config.seek.speeds_slowmotion.value) or config.seek.speeds_slowmotion.value[0]
self.setSeekState(self.makeStateSlowMotion(speed))
def seekBack(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
seekstate = self.seekstate
if seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
elif seekstate == self.SEEK_STATE_EOF:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
self.doSeekRelative(-6)
elif seekstate == self.SEEK_STATE_PAUSE:
self.doSeekRelative(-1)
elif self.isStateForward(seekstate):
speed = seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getLower(speed, config.seek.speeds_forward.value)
if speed:
self.setSeekState(self.makeStateForward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateBackward(seekstate):
speed = -seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_backward.value) or config.seek.speeds_backward.value[-1]
self.setSeekState(self.makeStateBackward(speed))
elif self.isStateSlowMotion(seekstate):
speed = self.getHigher(seekstate[2], config.seek.speeds_slowmotion.value)
if speed:
self.setSeekState(self.makeStateSlowMotion(speed))
else:
self.setSeekState(self.SEEK_STATE_PAUSE)
def seekFwdManual(self):
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def fwdSeekTo(self, minutes):
print "Seek", minutes, "minutes forward"
self.doSeekRelative(minutes * 60 * 90000)
def seekBackManual(self):
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def rwdSeekTo(self, minutes):
print "rwdSeekTo"
self.doSeekRelative(-minutes * 60 * 90000)
def checkSkipShowHideLock(self):
wantlock = self.seekstate != self.SEEK_STATE_PLAY
if config.usage.show_infobar_on_skip.value:
if self.lockedBecauseOfSkipping and not wantlock:
self.unlockShow()
self.lockedBecauseOfSkipping = False
if wantlock and not self.lockedBecauseOfSkipping:
self.lockShow()
self.lockedBecauseOfSkipping = True
def calcRemainingTime(self):
seekable = self.getSeek()
if seekable is not None:
len = seekable.getLength()
try:
tmp = self.cueGetEndCutPosition()
if tmp:
len = (False, tmp)
except:
pass
pos = seekable.getPlayPosition()
speednom = self.seekstate[1] or 1
speedden = self.seekstate[2] or 1
if not len[0] and not pos[0]:
if len[1] <= pos[1]:
return 0
time = (len[1] - pos[1])*speedden/(90*speednom)
return time
return False
def __evEOF(self):
if self.seekstate == self.SEEK_STATE_EOF:
return
# if we are seeking forward, we try to end up ~1s before the end, and pause there.
seekstate = self.seekstate
if self.seekstate != self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_EOF)
if seekstate not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE): # if we are seeking
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-1)
if seekstate == self.SEEK_STATE_PLAY: # regular EOF
self.doEofInternal(True)
else:
self.doEofInternal(False)
def doEofInternal(self, playing):
pass # Defined in subclasses
def __evSOF(self):
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
# This is needed, because some Mediaplayer use InfoBarSeek but not InfoBarCueSheetSupport
def seekPreviousMark(self):
if isinstance(self, InfoBarCueSheetSupport):
self.jumpPreviousMark()
def seekNextMark(self):
if isinstance(self, InfoBarCueSheetSupport):
self.jumpNextMark()
from Screens.PVRState import PVRState, TimeshiftState
class InfoBarPVRState:
def __init__(self, screen=PVRState, force_show = False):
self.onPlayStateChanged.append(self.__playStateChanged)
self.pvrStateDialog = self.session.instantiateDialog(screen)
self.onShow.append(self._mayShow)
self.onHide.append(self.pvrStateDialog.hide)
self.force_show = force_show
def _mayShow(self):
if self.shown and self.seekstate != self.SEEK_STATE_PLAY:
self.pvrStateDialog.show()
def __playStateChanged(self, state):
playstateString = state[3]
self.pvrStateDialog["state"].setText(playstateString)
# if we return into "PLAY" state, ensure that the dialog gets hidden if there will be no infobar displayed
if not config.usage.show_infobar_on_skip.value and self.seekstate == self.SEEK_STATE_PLAY and not self.force_show:
self.pvrStateDialog.hide()
else:
self._mayShow()
class TimeshiftLive(Screen):
def __init__(self, session):
Screen.__init__(self, session)
class InfoBarTimeshiftState(InfoBarPVRState):
def __init__(self):
InfoBarPVRState.__init__(self, screen=TimeshiftState, force_show = True)
self.timeshiftLiveScreen = self.session.instantiateDialog(TimeshiftLive)
self.onHide.append(self.timeshiftLiveScreen.hide)
self.secondInfoBarScreen and self.secondInfoBarScreen.onShow.append(self.timeshiftLiveScreen.hide)
self.timeshiftLiveScreen.hide()
self.__hideTimer = eTimer()
self.__hideTimer.callback.append(self.__hideTimeshiftState)
self.onFirstExecBegin.append(self.pvrStateDialog.show)
def _mayShow(self):
if self.timeshiftEnabled():
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
if self.timeshiftActivated():
self.pvrStateDialog.show()
self.timeshiftLiveScreen.hide()
elif self.showTimeshiftState:
self.pvrStateDialog.hide()
self.timeshiftLiveScreen.show()
self.showTimeshiftState = False
if self.seekstate == self.SEEK_STATE_PLAY and config.usage.infobar_timeout.index and (self.pvrStateDialog.shown or self.timeshiftLiveScreen.shown):
self.__hideTimer.startLongTimer(config.usage.infobar_timeout.index)
else:
self.__hideTimeshiftState()
def __hideTimeshiftState(self):
self.pvrStateDialog.hide()
self.timeshiftLiveScreen.hide()
class InfoBarShowMovies:
# i don't really like this class.
# it calls a not further specified "movie list" on up/down/movieList,
# so this is not more than an action map
def __init__(self):
self["MovieListActions"] = HelpableActionMap(self, "InfobarMovieListActions",
{
"movieList": (self.showMovies, _("Open the movie list")),
"up": (self.up, _("Open the movie list")),
"down": (self.down, _("Open the movie list"))
})
# InfoBarTimeshift requires InfoBarSeek, instantiated BEFORE!
# Hrmf.
#
# Timeshift works the following way:
# demux0 demux1 "TimeshiftActions" "TimeshiftActivateActions" "SeekActions"
# - normal playback TUNER unused PLAY enable disable disable
# - user presses "yellow" button. FILE record PAUSE enable disable enable
# - user presess pause again FILE record PLAY enable disable enable
# - user fast forwards FILE record FF enable disable enable
# - end of timeshift buffer reached TUNER record PLAY enable enable disable
# - user backwards FILE record BACK # !! enable disable enable
#
# in other words:
# - when a service is playing, pressing the "timeshiftStart" button ("yellow") enables recording ("enables timeshift"),
# freezes the picture (to indicate timeshift), sets timeshiftMode ("activates timeshift")
# now, the service becomes seekable, so "SeekActions" are enabled, "TimeshiftEnableActions" are disabled.
# - the user can now PVR around
# - if it hits the end, the service goes into live mode ("deactivates timeshift", it's of course still "enabled")
# the service looses it's "seekable" state. It can still be paused, but just to activate timeshift right
# after!
# the seek actions will be disabled, but the timeshiftActivateActions will be enabled
# - if the user rewinds, or press pause, timeshift will be activated again
# note that a timeshift can be enabled ("recording") and
# activated (currently time-shifting).
class InfoBarTimeshift:
def __init__(self):
self["TimeshiftActions"] = HelpableActionMap(self, "InfobarTimeshiftActions",
{
"timeshiftStart": (self.startTimeshift, _("Start timeshift")), # the "yellow key"
"timeshiftStop": (self.stopTimeshift, _("Stop timeshift")) # currently undefined :), probably 'TV'
}, prio=1)
self["TimeshiftActivateActions"] = ActionMap(["InfobarTimeshiftActivateActions"],
{
"timeshiftActivateEnd": self.activateTimeshiftEnd, # something like "rewind key"
"timeshiftActivateEndAndPause": self.activateTimeshiftEndAndPause # something like "pause key"
}, prio=-1) # priority over record
self["TimeshiftActivateActions"].setEnabled(False)
self.ts_rewind_timer = eTimer()
self.ts_rewind_timer.callback.append(self.rewindService)
self.ts_start_delay_timer = eTimer()
self.ts_start_delay_timer.callback.append(self.startTimeshiftWithoutPause)
self.ts_current_event_timer = eTimer()
self.ts_current_event_timer.callback.append(self.saveTimeshiftFileForEvent)
self.save_timeshift_file = False
self.timeshift_was_activated = False
self.showTimeshiftState = False
self.save_timeshift_only_current_event = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evEnd: self.__serviceEnd
})
def getTimeshift(self):
service = self.session.nav.getCurrentService()
return service and service.timeshift()
def timeshiftEnabled(self):
ts = self.getTimeshift()
return ts and ts.isTimeshiftEnabled()
def timeshiftActivated(self):
ts = self.getTimeshift()
return ts and ts.isTimeshiftActive()
def startTimeshift(self, pauseService = True):
print "enable timeshift"
ts = self.getTimeshift()
if ts is None:
if not pauseService and not int(config.usage.timeshift_start_delay.value):
self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, simple = True)
print "no ts interface"
return 0
if ts.isTimeshiftEnabled():
print "hu, timeshift already enabled?"
else:
if not ts.startTimeshift():
# we remove the "relative time" for now.
#self.pvrStateDialog["timeshift"].setRelative(time.time())
if pauseService:
# PAUSE.
#self.setSeekState(self.SEEK_STATE_PAUSE)
self.activateTimeshiftEnd(False)
self.showTimeshiftState = True
else:
self.showTimeshiftState = False
# enable the "TimeshiftEnableActions", which will override
# the startTimeshift actions
self.__seekableStatusChanged()
# get current timeshift filename and calculate new
self.save_timeshift_file = False
self.save_timeshift_in_movie_dir = False
self.setCurrentEventTimer()
self.current_timeshift_filename = ts.getTimeshiftFilename()
self.new_timeshift_filename = self.generateNewTimeshiftFileName()
else:
print "timeshift failed"
def startTimeshiftWithoutPause(self):
self.startTimeshift(False)
def stopTimeshift(self):
ts = self.getTimeshift()
if ts and ts.isTimeshiftEnabled():
if int(config.usage.timeshift_start_delay.value):
ts.switchToLive()
else:
self.checkTimeshiftRunning(self.stopTimeshiftcheckTimeshiftRunningCallback)
else:
return 0
def stopTimeshiftcheckTimeshiftRunningCallback(self, answer):
ts = self.getTimeshift()
if answer and ts:
ts.stopTimeshift()
self.pvrStateDialog.hide()
self.setCurrentEventTimer()
# disable actions
self.__seekableStatusChanged()
# activates timeshift, and seeks to (almost) the end
def activateTimeshiftEnd(self, back = True):
self.showTimeshiftState = True
ts = self.getTimeshift()
print "activateTimeshiftEnd"
if ts is None:
return
if ts.isTimeshiftActive():
print "!! activate timeshift called - but shouldn't this be a normal pause?"
self.pauseService()
else:
print "play, ..."
ts.activateTimeshift() # activate timeshift will automatically pause
self.setSeekState(self.SEEK_STATE_PAUSE)
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-90000) # seek approx. 1 sec before end
self.timeshift_was_activated = True
if back:
self.ts_rewind_timer.start(200, 1)
def rewindService(self):
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
# generates only filename without path
def generateNewTimeshiftFileName(self):
name = "timeshift record"
info = { }
self.getProgramInfoAndEvent(info, name)
serviceref = info["serviceref"]
service_name = ""
if isinstance(serviceref, eServiceReference):
service_name = ServiceReference(serviceref).getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(time()))
filename = begin_date + " - " + service_name
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(time())) + " - " + info["name"]
elif config.recording.filename_composition.value == "long":
filename += " - " + info["name"] + " - " + info["description"]
else:
filename += " - " + info["name"] # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
print "New timeshift filename: ", filename
return filename
# same as activateTimeshiftEnd, but pauses afterwards.
def activateTimeshiftEndAndPause(self):
print "activateTimeshiftEndAndPause"
#state = self.seekstate
self.activateTimeshiftEnd(False)
def callServiceStarted(self):
self.__serviceStarted()
def __seekableStatusChanged(self):
self["TimeshiftActivateActions"].setEnabled(not self.isSeekable() and self.timeshiftEnabled())
state = self.getSeek() is not None and self.timeshiftEnabled()
self["SeekActions"].setEnabled(state)
if not state:
self.setSeekState(self.SEEK_STATE_PLAY)
self.restartSubtitle()
def __serviceStarted(self):
self.pvrStateDialog.hide()
self.__seekableStatusChanged()
if self.ts_start_delay_timer.isActive():
self.ts_start_delay_timer.stop()
if int(config.usage.timeshift_start_delay.value):
self.ts_start_delay_timer.start(int(config.usage.timeshift_start_delay.value) * 1000, True)
def checkTimeshiftRunning(self, returnFunction):
if self.timeshiftEnabled() and config.usage.check_timeshift.value and self.timeshift_was_activated:
message = _("Stop timeshift?")
if not self.save_timeshift_file:
choice = [(_("Yes"), "stop"), (_("No"), "continue"), (_("Yes and save"), "save"), (_("Yes and save in movie dir"), "save_movie")]
else:
choice = [(_("Yes"), "stop"), (_("No"), "continue")]
message += "\n" + _("Reminder, you have chosen to save timeshift file.")
if self.save_timeshift_only_current_event:
remaining = self.currentEventTime()
if remaining > 0:
message += "\n" + _("The %d min remaining before the end of the event.") % abs(remaining / 60)
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice)
else:
returnFunction(True)
def checkTimeshiftRunningCallback(self, returnFunction, answer):
if answer:
if "movie" in answer:
self.save_timeshift_in_movie_dir = True
if "save" in answer:
self.save_timeshift_file = True
ts = self.getTimeshift()
if ts:
ts.saveTimeshiftFile()
del ts
if "continue" not in answer:
self.saveTimeshiftFiles()
returnFunction(answer and answer != "continue")
# renames/moves timeshift files if requested
def __serviceEnd(self):
self.saveTimeshiftFiles()
self.setCurrentEventTimer()
self.timeshift_was_activated = False
def saveTimeshiftFiles(self):
if self.save_timeshift_file and self.current_timeshift_filename and self.new_timeshift_filename:
if config.usage.timeshift_path.value and not self.save_timeshift_in_movie_dir:
dirname = config.usage.timeshift_path.value
else:
dirname = defaultMoviePath()
filename = getRecordingFilename(self.new_timeshift_filename, dirname) + ".ts"
fileList = []
fileList.append((self.current_timeshift_filename, filename))
if fileExists(self.current_timeshift_filename + ".sc"):
fileList.append((self.current_timeshift_filename + ".sc", filename + ".sc"))
if fileExists(self.current_timeshift_filename + ".cuts"):
fileList.append((self.current_timeshift_filename + ".cuts", filename + ".cuts"))
moveFiles(fileList)
self.save_timeshift_file = False
self.setCurrentEventTimer()
def currentEventTime(self):
remaining = 0
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(ref, -1, 0)
if event:
now = int(time())
start = event.getBeginTime()
duration = event.getDuration()
end = start + duration
remaining = end - now
return remaining
def saveTimeshiftFileForEvent(self):
if self.timeshiftEnabled() and self.save_timeshift_only_current_event and self.timeshift_was_activated and self.save_timeshift_file:
message = _("Current event is over.\nSelect an option to save the timeshift file.")
choice = [(_("Save and stop timeshift"), "save"), (_("Save and restart timeshift"), "restart"), (_("Don't save and stop timeshift"), "stop"), (_("Do nothing"), "continue")]
self.session.openWithCallback(self.saveTimeshiftFileForEventCallback, MessageBox, message, simple = True, list = choice, timeout=15)
def saveTimeshiftFileForEventCallback(self, answer):
self.save_timeshift_only_current_event = False
if answer:
ts = self.getTimeshift()
if ts and answer in ("save", "restart", "stop"):
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
if answer in ("save", "restart"):
ts.saveTimeshiftFile()
del ts
self.saveTimeshiftFiles()
if answer == "restart":
self.ts_start_delay_timer.start(1000, True)
self.save_timeshift_file = False
self.save_timeshift_in_movie_dir = False
def setCurrentEventTimer(self, duration=0):
self.ts_current_event_timer.stop()
self.save_timeshift_only_current_event = False
if duration > 0:
self.save_timeshift_only_current_event = True
self.ts_current_event_timer.startLongTimer(duration)
from Screens.PiPSetup import PiPSetup
class InfoBarExtensions:
EXTENSION_SINGLE = 0
EXTENSION_LIST = 1
def __init__(self):
self.list = []
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"extensions": (self.showExtensionSelection, _("Show extensions...")),
}, 1) # lower priority
def addExtension(self, extension, key = None, type = EXTENSION_SINGLE):
self.list.append((type, extension, key))
def updateExtension(self, extension, key = None):
self.extensionsList.append(extension)
if key is not None:
if self.extensionKeys.has_key(key):
key = None
if key is None:
for x in self.availableKeys:
if not self.extensionKeys.has_key(x):
key = x
break
if key is not None:
self.extensionKeys[key] = len(self.extensionsList) - 1
def updateExtensions(self):
self.extensionsList = []
self.availableKeys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ]
self.extensionKeys = {}
for x in self.list:
if x[0] == self.EXTENSION_SINGLE:
self.updateExtension(x[1], x[2])
else:
for y in x[1]():
self.updateExtension(y[0], y[1])
def showExtensionSelection(self):
self.updateExtensions()
extensionsList = self.extensionsList[:]
keys = []
list = []
for x in self.availableKeys:
if self.extensionKeys.has_key(x):
entry = self.extensionKeys[x]
extension = self.extensionsList[entry]
if extension[2]():
name = str(extension[0]())
list.append((extension[0](), extension))
keys.append(x)
extensionsList.remove(extension)
else:
extensionsList.remove(extension)
list.extend([(x[0](), x) for x in extensionsList])
keys += [""] * len(extensionsList)
self.session.openWithCallback(self.extensionCallback, ChoiceBox, title=_("Please choose an extension..."), list=list, keys=keys, skin_name="ExtensionsList", reorderConfig="extension_order")
def extensionCallback(self, answer):
if answer is not None:
answer[1][1]()
from Tools.BoundFunction import boundFunction
import inspect
# depends on InfoBarExtensions
class InfoBarPlugins:
def __init__(self):
self.addExtension(extension = self.getPluginList, type = InfoBarExtensions.EXTENSION_LIST)
def getPluginName(self, name):
return name
def getPluginList(self):
l = []
for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EXTENSIONSMENU):
args = inspect.getargspec(p.__call__)[0]
if len(args) == 1 or len(args) == 2 and isinstance(self, InfoBarChannelSelection):
l.append(((boundFunction(self.getPluginName, p.name), boundFunction(self.runPlugin, p), lambda: True), None, p.name))
l.sort(key = lambda e: e[2]) # sort by name
return l
def runPlugin(self, plugin):
if isinstance(self, InfoBarChannelSelection):
plugin(session = self.session, servicelist = self.servicelist)
else:
plugin(session = self.session)
from Components.Task import job_manager
class InfoBarJobman:
def __init__(self):
self.addExtension(extension = self.getJobList, type = InfoBarExtensions.EXTENSION_LIST)
def getJobList(self):
return [((boundFunction(self.getJobName, job), boundFunction(self.showJobView, job), lambda: True), None) for job in job_manager.getPendingJobs()]
def getJobName(self, job):
return "%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
# depends on InfoBarExtensions
class InfoBarPiP:
def __init__(self):
try:
self.session.pipshown
except:
self.session.pipshown = False
self.lastPiPService = None
if SystemInfo["PIPAvailable"]:
self["PiPActions"] = HelpableActionMap(self, "InfobarPiPActions",
{
"activatePiP": (self.activePiP, self.activePiPName),
})
if (self.allowPiP):
self.addExtension((self.getShowHideName, self.showPiP, lambda: True), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.addExtension((self.getSwapName, self.swapPiP, self.pipShown), "yellow")
self.addExtension((self.getTogglePipzapName, self.togglePipzap, lambda: True), "red")
else:
self.addExtension((self.getShowHideName, self.showPiP, self.pipShown), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.lastPiPServiceTimeoutTimer = eTimer()
self.lastPiPServiceTimeoutTimer.callback.append(self.clearLastPiPService)
def pipShown(self):
return self.session.pipshown
def pipHandles0Action(self):
return self.pipShown() and config.usage.pip_zero_button.value != "standard"
def getShowHideName(self):
if self.session.pipshown:
return _("Disable Picture in Picture")
else:
return _("Activate Picture in Picture")
def getSwapName(self):
return _("Swap services")
def getMoveName(self):
return _("Move Picture in Picture")
def getTogglePipzapName(self):
slist = self.servicelist
if slist and slist.dopipzap:
return _("Zap focus to main screen")
return _("Zap focus to Picture in Picture")
def togglePipzap(self):
if not self.session.pipshown:
self.showPiP()
slist = self.servicelist
if slist and self.session.pipshown:
slist.togglePipzap()
if slist.dopipzap:
currentServicePath = slist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.servicePath = currentServicePath
def showPiP(self):
self.lastPiPServiceTimeoutTimer.stop()
if self.session.pipshown:
slist = self.servicelist
if slist and slist.dopipzap:
self.togglePipzap()
if self.session.pipshown:
lastPiPServiceTimeout = int(config.usage.pip_last_service_timeout.value)
if lastPiPServiceTimeout >= 0:
self.lastPiPService = self.session.pip.getCurrentServiceReference()
if lastPiPServiceTimeout:
self.lastPiPServiceTimeoutTimer.startLongTimer(lastPiPServiceTimeout)
del self.session.pip
self.session.pipshown = False
if hasattr(self, "ScreenSaverTimerStart"):
self.ScreenSaverTimerStart()
else:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
newservice = self.lastPiPService or self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
newservice = self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
self.session.pipshown = False
del self.session.pip
if self.session.pipshown and hasattr(self, "screenSaverTimer"):
self.screenSaverTimer.stop()
self.lastPiPService = None
def clearLastPiPService(self):
self.lastPiPService = None
def activePiP(self):
if self.servicelist and self.servicelist.dopipzap or not self.session.pipshown:
self.showPiP()
else:
self.togglePipzap()
def activePiPName(self):
if self.servicelist and self.servicelist.dopipzap:
return _("Disable Picture in Picture")
if self.session.pipshown:
return _("Zap focus to Picture in Picture")
else:
return _("Activate Picture in Picture")
def swapPiP(self):
if self.pipShown():
swapservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
pipref = self.session.pip.getCurrentService()
if swapservice and pipref and pipref.toString() != swapservice.toString():
currentServicePath = self.servicelist.getCurrentServicePath()
currentBouquet = self.servicelist and self.servicelist.getRoot()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.playService(swapservice)
self.session.nav.playService(pipref, checkParentalControl=False, adjust=False)
self.session.pip.servicePath = currentServicePath
self.session.pip.servicePath[1] = currentBouquet
if self.servicelist.dopipzap:
# This unfortunately won't work with subservices
self.servicelist.setCurrentSelection(self.session.pip.getCurrentService())
def movePiP(self):
if self.pipShown():
self.session.open(PiPSetup, pip = self.session.pip)
def pipDoHandle0Action(self):
use = config.usage.pip_zero_button.value
if "swap" == use:
self.swapPiP()
elif "swapstop" == use:
self.swapPiP()
self.showPiP()
elif "stop" == use:
self.showPiP()
from RecordTimer import parseEvent, RecordTimerEntry
class InfoBarInstantRecord:
"""Instant Record - handles the instantRecord action in order to
start/stop instant records"""
def __init__(self):
self["InstantRecordActions"] = HelpableActionMap(self, "InfobarInstantRecord",
{
"instantRecord": (self.instantRecord, _("Instant recording...")),
})
self.SelectedInstantServiceRef = None
if isStandardInfoBar(self):
self.recording = []
else:
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance:
self.recording = InfoBarInstance.recording
def moveToTrash(self, entry):
print "instantRecord stop and delete recording: ", entry.name
import Tools.Trashcan
trash = Tools.Trashcan.createTrashFolder(entry.Filename)
from MovieSelection import moveServiceFiles
moveServiceFiles(entry.Filename, trash, entry.name, allowCopy=False)
def stopCurrentRecording(self, entry = -1):
def confirm(answer=False):
if answer:
self.session.nav.RecordTimer.removeEntry(self.recording[entry])
if self.deleteRecording:
self.moveToTrash(self.recording[entry])
self.recording.remove(self.recording[entry])
if entry is not None and entry != -1:
msg = _("Stop recording:")
if self.deleteRecording:
msg = _("Stop and delete recording:")
msg += "\n"
msg += " - " + self.recording[entry].name + "\n"
self.session.openWithCallback(confirm, MessageBox, msg, MessageBox.TYPE_YESNO)
def stopAllCurrentRecordings(self, list):
def confirm(answer=False):
if answer:
for entry in list:
self.session.nav.RecordTimer.removeEntry(entry[0])
self.recording.remove(entry[0])
if self.deleteRecording:
self.moveToTrash(entry[0])
msg = _("Stop recordings:")
if self.deleteRecording:
msg = _("Stop and delete recordings:")
msg += "\n"
for entry in list:
msg += " - " + entry[0].name + "\n"
self.session.openWithCallback(confirm, MessageBox, msg, MessageBox.TYPE_YESNO)
def getProgramInfoAndEvent(self, info, name):
info["serviceref"] = hasattr(self, "SelectedInstantServiceRef") and self.SelectedInstantServiceRef or self.session.nav.getCurrentlyPlayingServiceOrGroup()
# try to get event info
event = None
try:
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(info["serviceref"], -1, 0)
if event is None:
if hasattr(self, "SelectedInstantServiceRef") and self.SelectedInstantServiceRef:
service_info = eServiceCenter.getInstance().info(self.SelectedInstantServiceRef)
event = service_info and service_info.getEvent(self.SelectedInstantServiceRef)
else:
service = self.session.nav.getCurrentService()
event = service and service.info().getEvent(0)
except:
pass
info["event"] = event
info["name"] = name
info["description"] = ""
info["eventid"] = None
if event is not None:
curEvent = parseEvent(event)
info["name"] = curEvent[2]
info["description"] = curEvent[3]
info["eventid"] = curEvent[4]
info["end"] = curEvent[1]
def startInstantRecording(self, limitEvent = False):
begin = int(time())
end = begin + 3600 # dummy
name = "instant record"
info = { }
self.getProgramInfoAndEvent(info, name)
serviceref = info["serviceref"]
event = info["event"]
if event is not None:
if limitEvent:
end = info["end"]
else:
if limitEvent:
self.session.open(MessageBox, _("No event info found, recording indefinitely."), MessageBox.TYPE_INFO)
if isinstance(serviceref, eServiceReference):
serviceref = ServiceReference(serviceref)
recording = RecordTimerEntry(serviceref, begin, end, info["name"], info["description"], info["eventid"], dirname = preferredInstantRecordPath())
recording.dontSave = True
if event is None or limitEvent == False:
recording.autoincrease = True
recording.setAutoincreaseEnd()
simulTimerList = self.session.nav.RecordTimer.record(recording)
if simulTimerList is None: # no conflict
recording.autoincrease = False
self.recording.append(recording)
else:
if len(simulTimerList) > 1: # with other recording
name = simulTimerList[1].name
name_date = ' '.join((name, strftime('%F %T', localtime(simulTimerList[1].begin))))
print "[TIMER] conflicts with", name_date
recording.autoincrease = True # start with max available length, then increment
if recording.setAutoincreaseEnd():
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
self.session.open(MessageBox, _("Record time limited due to conflicting timer %s") % name_date, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to conflicting timer %s") % name, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to invalid service %s") % serviceref, MessageBox.TYPE_INFO)
recording.autoincrease = False
def isInstantRecordRunning(self):
print "self.recording:", self.recording
if self.recording:
for x in self.recording:
if x.isRunning():
return True
return False
def recordQuestionCallback(self, answer):
print "pre:\n", self.recording
if answer is None or answer[1] == "no":
return
list = []
recording = self.recording[:]
for x in recording:
if not x in self.session.nav.RecordTimer.timer_list:
self.recording.remove(x)
elif x.dontSave and x.isRunning():
list.append((x, False))
self.deleteRecording = False
if answer[1] == "changeduration":
if len(self.recording) == 1:
self.changeDuration(0)
else:
self.session.openWithCallback(self.changeDuration, TimerSelection, list)
elif answer[1] == "addrecordingtime":
if len(self.recording) == 1:
self.addRecordingTime(0)
else:
self.session.openWithCallback(self.addRecordingTime, TimerSelection, list)
elif answer[1] == "changeendtime":
if len(self.recording) == 1:
self.setEndtime(0)
else:
self.session.openWithCallback(self.setEndtime, TimerSelection, list)
elif answer[1] == "timer":
import TimerEdit
self.session.open(TimerEdit.TimerEditList)
elif answer[1] == "stop":
if len(self.recording) == 1:
self.stopCurrentRecording(0)
else:
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] == "stopdelete":
self.deleteRecording = True
if len(self.recording) == 1:
self.stopCurrentRecording(0)
else:
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] == "stopall":
self.stopAllCurrentRecordings(list)
elif answer[1] == "stopdeleteall":
self.deleteRecording = True
self.stopAllCurrentRecordings(list)
elif answer[1] in ( "indefinitely" , "manualduration", "manualendtime", "event"):
self.startInstantRecording(limitEvent = answer[1] in ("event", "manualendtime") or False)
if answer[1] == "manualduration":
self.changeDuration(len(self.recording)-1)
elif answer[1] == "manualendtime":
self.setEndtime(len(self.recording)-1)
elif "timeshift" in answer[1]:
ts = self.getTimeshift()
if ts:
ts.saveTimeshiftFile()
self.save_timeshift_file = True
if "movie" in answer[1]:
self.save_timeshift_in_movie_dir = True
if "event" in answer[1]:
remaining = self.currentEventTime()
if remaining > 0:
self.setCurrentEventTimer(remaining-15)
print "after:\n", self.recording
def setEndtime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.endtime=ConfigClock(default = self.recording[self.selectedEntry].end)
dlg = self.session.openWithCallback(self.TimeDateInputClosed, TimeDateInput, self.endtime)
dlg.setTitle(_("Please change recording endtime"))
def TimeDateInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
print "stopping recording at", strftime("%F %T", localtime(ret[1]))
if self.recording[self.selectedEntry].end != ret[1]:
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = ret[1]
self.session.nav.RecordTimer.timeChanged(self.recording[self.selectedEntry])
def changeDuration(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputCallback, InputBox, title=_("How many minutes do you want to record?"), text="5", maxSize=False, type=Input.NUMBER)
def addRecordingTime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputAddRecordingTime, InputBox, title=_("How many minutes do you want add to record?"), text="5", maxSize=False, type=Input.NUMBER)
def inputAddRecordingTime(self, value):
if value:
print "added", int(value), "minutes for recording."
entry = self.recording[self.selectedEntry]
if int(value) != 0:
entry.autoincrease = False
entry.end += 60 * int(value)
self.session.nav.RecordTimer.timeChanged(entry)
def inputCallback(self, value):
if value:
print "stopping recording after", int(value), "minutes."
entry = self.recording[self.selectedEntry]
if int(value) != 0:
entry.autoincrease = False
entry.end = int(time()) + 60 * int(value)
self.session.nav.RecordTimer.timeChanged(entry)
def isTimerRecordRunning(self):
identical = timers = 0
for timer in self.session.nav.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers += 1
if self.recording:
for x in self.recording:
if x.isRunning() and x == timer:
identical += 1
return timers > identical
def instantRecord(self, serviceRef=None):
self.SelectedInstantServiceRef = serviceRef
pirr = preferredInstantRecordPath()
if not findSafeRecordPath(pirr) and not findSafeRecordPath(defaultMoviePath()):
if not pirr:
pirr = ""
self.session.open(MessageBox, _("Missing ") + "\n" + pirr +
"\n" + _("No HDD found or HDD not initialized!"), MessageBox.TYPE_ERROR)
return
if isStandardInfoBar(self):
common = ((_("Add recording (stop after current event)"), "event"),
(_("Add recording (indefinitely)"), "indefinitely"),
(_("Add recording (enter recording duration)"), "manualduration"),
(_("Add recording (enter recording endtime)"), "manualendtime"),)
else:
common = ()
if self.isInstantRecordRunning():
title =_("A recording is currently running.\nWhat do you want to do?")
list = common + \
((_("Change recording (duration)"), "changeduration"),
(_("Change recording (add time)"), "addrecordingtime"),
(_("Change recording (endtime)"), "changeendtime"),)
list += ((_("Stop recording"), "stop"),)
if config.usage.movielist_trashcan.value:
list += ((_("Stop and delete recording"), "stopdelete"),)
if len(self.recording) > 1:
list += ((_("Stop all current recordings"), "stopall"),)
if config.usage.movielist_trashcan.value:
list += ((_("Stop and delete all current recordings"), "stopdeleteall"),)
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
list += ((_("Do nothing"), "no"),)
else:
title=_("Start recording?")
list = common
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
if isStandardInfoBar(self):
list += ((_("Do not record"), "no"),)
if isStandardInfoBar(self) and self.timeshiftEnabled():
list = list + ((_("Save timeshift file"), "timeshift"),
(_("Save timeshift file in movie directory"), "timeshift_movie"))
if self.currentEventTime() > 0:
list += ((_("Save timeshift only for current event"), "timeshift_event"),)
if list:
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox, title=title, list=list)
else:
return 0
from Tools.ISO639 import LanguageCodes
class InfoBarAudioSelection:
def __init__(self):
self["AudioSelectionAction"] = HelpableActionMap(self, "InfobarAudioSelectionActions",
{
"audioSelection": (self.audioSelection, _("Audio options...")),
})
def audioSelection(self):
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
def audioSelected(self, ret=None):
print "[infobar::audioSelected]", ret
class InfoBarSubserviceSelection:
def __init__(self):
self["SubserviceSelectionAction"] = HelpableActionMap(self, "InfobarSubserviceSelectionActions",
{
"subserviceSelection": (self.subserviceSelection, _("Subservice list...")),
})
self["SubserviceQuickzapAction"] = HelpableActionMap(self, "InfobarSubserviceQuickzapActions",
{
"nextSubservice": (self.nextSubservice, _("Switch to next sub service")),
"prevSubservice": (self.prevSubservice, _("Switch to previous sub service"))
}, -1)
self["SubserviceQuickzapAction"].setEnabled(False)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.checkSubservicesAvail
})
self.onClose.append(self.__removeNotifications)
self.bsel = None
def __removeNotifications(self):
self.session.nav.event.remove(self.checkSubservicesAvail)
def checkSubservicesAvail(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
self["SubserviceQuickzapAction"].setEnabled(False)
def nextSubservice(self):
self.changeSubservice(+1)
def prevSubservice(self):
self.changeSubservice(-1)
def changeSubservice(self, direction):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
n = subservices and subservices.getNumberOfSubservices()
if n and n > 0:
selection = -1
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
idx = 0
while idx < n:
if subservices.getSubservice(idx).toString() == ref.toString():
selection = idx
break
idx += 1
if selection != -1:
selection += direction
if selection >= n:
selection=0
elif selection < 0:
selection=n-1
newservice = subservices.getSubservice(selection)
if newservice.valid():
del subservices
del service
self.session.nav.playService(newservice, False)
def subserviceSelection(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
self.bouquets = self.servicelist.getBouquetList()
n = subservices and subservices.getNumberOfSubservices()
selection = 0
if n and n > 0:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
tlist = []
idx = 0
while idx < n:
i = subservices.getSubservice(idx)
if i.toString() == ref.toString():
selection = idx
tlist.append((i.getName(), i))
idx += 1
if self.bouquets and len(self.bouquets):
keys = ["red", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
if config.usage.multibouquet.value:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to bouquet"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to favourites"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
selection += 3
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), ("--", "")] + tlist
keys = ["red", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
selection += 2
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a sub service..."), list = tlist, selection = selection, keys = keys, skin_name = "SubserviceSelection")
def subserviceSelected(self, service):
del self.bouquets
if not service is None:
if isinstance(service[1], str):
if service[1] == "quickzap":
from Screens.SubservicesQuickzap import SubservicesQuickzap
self.session.open(SubservicesQuickzap, service[2])
else:
self["SubserviceQuickzapAction"].setEnabled(True)
self.session.nav.playService(service[1], False)
def addSubserviceToBouquetCallback(self, service):
if len(service) > 1 and isinstance(service[1], eServiceReference):
self.selectedSubservice = service
if self.bouquets is None:
cnt = 0
else:
cnt = len(self.bouquets)
if cnt > 1: # show bouquet list
self.bsel = self.session.openWithCallback(self.bouquetSelClosed, BouquetSelector, self.bouquets, self.addSubserviceToBouquet)
elif cnt == 1: # add to only one existing bouquet
self.addSubserviceToBouquet(self.bouquets[0][1])
self.session.open(MessageBox, _("Service has been added to the favourites."), MessageBox.TYPE_INFO)
def bouquetSelClosed(self, confirmed):
self.bsel = None
del self.selectedSubservice
if confirmed:
self.session.open(MessageBox, _("Service has been added to the selected bouquet."), MessageBox.TYPE_INFO)
def addSubserviceToBouquet(self, dest):
self.servicelist.addServiceToBouquet(dest, self.selectedSubservice[1])
if self.bsel:
self.bsel.close(True)
else:
del self.selectedSubservice
class InfoBarRedButton:
def __init__(self):
self["RedButtonActions"] = HelpableActionMap(self, "InfobarRedButtonActions",
{
"activateRedButton": (self.activateRedButton, _("Red button...")),
})
self.onHBBTVActivation = [ ]
self.onRedButtonActivation = [ ]
def activateRedButton(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info and info.getInfoString(iServiceInformation.sHBBTVUrl) != "":
for x in self.onHBBTVActivation:
x()
elif False: # TODO: other red button services
for x in self.onRedButtonActivation:
x()
class InfoBarTimerButton:
def __init__(self):
self["TimerButtonActions"] = HelpableActionMap(self, "InfobarTimerButtonActions",
{
"timerSelection": (self.timerSelection, _("Timer selection...")),
})
def timerSelection(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
class InfoBarVmodeButton:
def __init__(self):
self["VmodeButtonActions"] = HelpableActionMap(self, "InfobarVmodeButtonActions",
{
"vmodeSelection": (self.vmodeSelection, _("Letterbox zoom")),
})
def vmodeSelection(self):
self.session.open(VideoMode)
class VideoMode(Screen):
def __init__(self,session):
Screen.__init__(self, session)
self["videomode"] = Label()
self["actions"] = NumberActionMap( [ "InfobarVmodeButtonActions" ],
{
"vmodeSelection": self.selectVMode
})
self.Timer = eTimer()
self.Timer.callback.append(self.quit)
self.selectVMode()
def selectVMode(self):
policy = config.av.policy_43
if self.isWideScreen():
policy = config.av.policy_169
idx = policy.choices.index(policy.value)
idx = (idx + 1) % len(policy.choices)
policy.value = policy.choices[idx]
self["videomode"].setText(policy.value)
self.Timer.start(1000, True)
def isWideScreen(self):
from Components.Converter.ServiceInfo import WIDESCREEN
service = self.session.nav.getCurrentService()
info = service and service.info()
return info.getInfo(iServiceInformation.sAspect) in WIDESCREEN
def quit(self):
self.Timer.stop()
self.close()
class InfoBarAdditionalInfo:
def __init__(self):
self["RecordingPossible"] = Boolean(fixed=harddiskmanager.HDDCount() > 0)
self["TimeshiftPossible"] = self["RecordingPossible"]
self["ExtensionsAvailable"] = Boolean(fixed=1)
# TODO: these properties should be queried from the input device keymap
self["ShowTimeshiftOnYellow"] = Boolean(fixed=0)
self["ShowAudioOnYellow"] = Boolean(fixed=0)
self["ShowRecordOnRed"] = Boolean(fixed=0)
class InfoBarNotifications:
def __init__(self):
self.onExecBegin.append(self.checkNotifications)
Notifications.notificationAdded.append(self.checkNotificationsIfExecing)
self.onClose.append(self.__removeNotification)
def __removeNotification(self):
Notifications.notificationAdded.remove(self.checkNotificationsIfExecing)
def checkNotificationsIfExecing(self):
if self.execing:
self.checkNotifications()
def checkNotifications(self):
notifications = Notifications.notifications
if notifications:
n = notifications[0]
del notifications[0]
cb = n[0]
if n[3].has_key("onSessionOpenCallback"):
n[3]["onSessionOpenCallback"]()
del n[3]["onSessionOpenCallback"]
if cb:
dlg = self.session.openWithCallback(cb, n[1], *n[2], **n[3])
elif not Notifications.current_notifications and n[4] == "ZapError":
if n[3].has_key("timeout"):
del n[3]["timeout"]
n[3]["enable_input"] = False
dlg = self.session.instantiateDialog(n[1], *n[2], **n[3])
self.hide()
dlg.show()
self.notificationDialog = dlg
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressNotification)
else:
dlg = self.session.open(n[1], *n[2], **n[3])
# remember that this notification is currently active
d = (n[4], dlg)
Notifications.current_notifications.append(d)
dlg.onClose.append(boundFunction(self.__notificationClosed, d))
def closeNotificationInstantiateDialog(self):
if hasattr(self, "notificationDialog"):
self.session.deleteDialog(self.notificationDialog)
del self.notificationDialog
eActionMap.getInstance().unbindAction('', self.keypressNotification)
def keypressNotification(self, key, flag):
if flag:
self.closeNotificationInstantiateDialog()
def __notificationClosed(self, d):
Notifications.current_notifications.remove(d)
class InfoBarServiceNotifications:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.serviceHasEnded
})
def serviceHasEnded(self):
print "service end!"
try:
self.setSeekState(self.SEEK_STATE_PLAY)
except:
pass
class InfoBarCueSheetSupport:
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
ENABLE_RESUME_SUPPORT = False
def __init__(self, actionmap = "InfobarCueSheetActions"):
self["CueSheetActions"] = HelpableActionMap(self, actionmap,
{
"jumpPreviousMark": (self.jumpPreviousMark, _("Jump to previous marked position")),
"jumpNextMark": (self.jumpNextMark, _("Jump to next marked position")),
"toggleMark": (self.toggleMark, _("Toggle a cut mark at the current position"))
}, prio=1)
self.cut_list = [ ]
self.is_closing = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evCuesheetChanged: self.downloadCuesheet,
})
def __serviceStarted(self):
if self.is_closing:
return
print "new service started! trying to download cuts!"
self.downloadCuesheet()
if self.ENABLE_RESUME_SUPPORT:
for (pts, what) in self.cut_list:
if what == self.CUT_TYPE_LAST:
last = pts
break
else:
last = getResumePoint(self.session)
if last is None:
return
# only resume if at least 10 seconds ahead, or <10 seconds before the end.
seekable = self.__getSeekable()
if seekable is None:
return # Should not happen?
length = seekable.getLength() or (None,0)
print "seekable.getLength() returns:", length
# Hmm, this implies we don't resume if the length is unknown...
if (last > 900000) and (not length[1] or (last < length[1] - 900000)):
self.resume_point = last
l = last / 90000
if "ask" in config.usage.on_movie_start.value or not length[1]:
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Do you want to resume this playback?") + "\n" + (_("Resume position at %s") % ("%d:%02d:%02d" % (l/3600, l%3600/60, l%60))), timeout=10, default="yes" in config.usage.on_movie_start.value)
elif config.usage.on_movie_start.value == "resume":
# TRANSLATORS: The string "Resuming playback" flashes for a moment
# TRANSLATORS: at the start of a movie, when the user has selected
# TRANSLATORS: "Resume from last position" as start behavior.
# TRANSLATORS: The purpose is to notify the user that the movie starts
# TRANSLATORS: in the middle somewhere and not from the beginning.
# TRANSLATORS: (Some translators seem to have interpreted it as a
# TRANSLATORS: question or a choice, but it is a statement.)
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Resuming playback"), timeout=2, type=MessageBox.TYPE_INFO)
def playLastCB(self, answer):
if answer == True:
self.doSeek(self.resume_point)
self.hideAfterResume()
def hideAfterResume(self):
if isinstance(self, InfoBarShowHide):
self.hide()
def __getSeekable(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.seek()
def cueGetCurrentPosition(self):
seek = self.__getSeekable()
if seek is None:
return None
r = seek.getPlayPosition()
if r[0]:
return None
return long(r[1])
def cueGetEndCutPosition(self):
ret = False
isin = True
for cp in self.cut_list:
if cp[1] == self.CUT_TYPE_OUT:
if isin:
isin = False
ret = cp[0]
elif cp[1] == self.CUT_TYPE_IN:
isin = True
return ret
def jumpPreviousNextMark(self, cmp, start=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
return False
mark = self.getNearestCutPoint(current_pos, cmp=cmp, start=start)
if mark is not None:
pts = mark[0]
else:
return False
self.doSeek(pts)
return True
def jumpPreviousMark(self):
# we add 5 seconds, so if the play position is <5s after
# the mark, the mark before will be used
self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True)
def jumpNextMark(self):
if not self.jumpPreviousNextMark(lambda x: x-90000):
self.doSeek(-1)
def getNearestCutPoint(self, pts, cmp=abs, start=False):
# can be optimized
beforecut = True
nearest = None
bestdiff = -1
instate = True
if start:
bestdiff = cmp(0 - pts)
if bestdiff >= 0:
nearest = [0, False]
for cp in self.cut_list:
if beforecut and cp[1] in (self.CUT_TYPE_IN, self.CUT_TYPE_OUT):
beforecut = False
if cp[1] == self.CUT_TYPE_IN: # Start is here, disregard previous marks
diff = cmp(cp[0] - pts)
if start and diff >= 0:
nearest = cp
bestdiff = diff
else:
nearest = None
bestdiff = -1
if cp[1] == self.CUT_TYPE_IN:
instate = True
elif cp[1] == self.CUT_TYPE_OUT:
instate = False
elif cp[1] in (self.CUT_TYPE_MARK, self.CUT_TYPE_LAST):
diff = cmp(cp[0] - pts)
if instate and diff >= 0 and (nearest is None or bestdiff > diff):
nearest = cp
bestdiff = diff
return nearest
def toggleMark(self, onlyremove=False, onlyadd=False, tolerance=5*90000, onlyreturn=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
print "not seekable"
return
nearest_cutpoint = self.getNearestCutPoint(current_pos)
if nearest_cutpoint is not None and abs(nearest_cutpoint[0] - current_pos) < tolerance:
if onlyreturn:
return nearest_cutpoint
if not onlyadd:
self.removeMark(nearest_cutpoint)
elif not onlyremove and not onlyreturn:
self.addMark((current_pos, self.CUT_TYPE_MARK))
if onlyreturn:
return None
def addMark(self, point):
insort(self.cut_list, point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def removeMark(self, point):
self.cut_list.remove(point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def showAfterCuesheetOperation(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def __getCuesheet(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.cueSheet()
def uploadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
print "upload failed, no cuesheet interface"
return
cue.setCutList(self.cut_list)
def downloadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
print "download failed, no cuesheet interface"
self.cut_list = [ ]
else:
self.cut_list = cue.getCutList()
class InfoBarSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="82,18" font="Regular;16" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="82,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.Event_Now" render="Progress" position="6,46" size="46,18" borderWidth="1" >
<convert type="EventTime">Progress</convert>
</widget>
</screen>"""
# for picon: (path="piconlcd" will use LCD picons)
# <widget source="session.CurrentService" render="Picon" position="6,0" size="120,64" path="piconlcd" >
# <convert type="ServiceName">Reference</convert>
# </widget>
class InfoBarSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarSummary
class InfoBarMoviePlayerSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="64,18" font="Regular;16" halign="right" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="64,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.CurrentService" render="Progress" position="6,46" size="56,18" borderWidth="1" >
<convert type="ServicePosition">Position</convert>
</widget>
</screen>"""
class InfoBarMoviePlayerSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarMoviePlayerSummary
class InfoBarTeletextPlugin:
def __init__(self):
self.teletext_plugin = None
for p in plugins.getPlugins(PluginDescriptor.WHERE_TELETEXT):
self.teletext_plugin = p
if self.teletext_plugin is not None:
self["TeletextActions"] = HelpableActionMap(self, "InfobarTeletextActions",
{
"startTeletext": (self.startTeletext, _("View teletext..."))
})
else:
print "no teletext plugin found!"
def startTeletext(self):
self.teletext_plugin and self.teletext_plugin(session=self.session, service=self.session.nav.getCurrentService())
class InfoBarSubtitleSupport(object):
def __init__(self):
object.__init__(self)
self["SubtitleSelectionAction"] = HelpableActionMap(self, "InfobarSubtitleSelectionActions",
{
"subtitleSelection": (self.subtitleSelection, _("Subtitle selection...")),
})
self.selected_subtitle = None
if isStandardInfoBar(self):
self.subtitle_window = self.session.instantiateDialog(SubtitleDisplay)
else:
from Screens.InfoBar import InfoBar
self.subtitle_window = InfoBar.instance.subtitle_window
self.subtitle_window.hide()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceChanged,
iPlayableService.evEnd: self.__serviceChanged,
iPlayableService.evUpdatedInfo: self.__updatedInfo
})
def getCurrentServiceSubtitle(self):
service = self.session.nav.getCurrentService()
return service and service.subtitle()
def subtitleSelection(self):
subtitle = self.getCurrentServiceSubtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if self.selected_subtitle or subtitlelist and len(subtitlelist)>0:
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
else:
return 0
def __serviceChanged(self):
if self.selected_subtitle:
self.selected_subtitle = None
self.subtitle_window.hide()
def __updatedInfo(self):
if not self.selected_subtitle:
subtitle = self.getCurrentServiceSubtitle()
cachedsubtitle = subtitle.getCachedSubtitle()
if cachedsubtitle:
self.enableSubtitle(cachedsubtitle)
def enableSubtitle(self, selectedSubtitle):
subtitle = self.getCurrentServiceSubtitle()
self.selected_subtitle = selectedSubtitle
if subtitle and self.selected_subtitle:
subtitle.enableSubtitles(self.subtitle_window.instance, self.selected_subtitle)
self.subtitle_window.show()
else:
if subtitle:
subtitle.disableSubtitles(self.subtitle_window.instance)
self.subtitle_window.hide()
def restartSubtitle(self):
if self.selected_subtitle:
self.enableSubtitle(self.selected_subtitle)
class InfoBarServiceErrorPopupSupport:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evTuneFailed: self.__tuneFailed,
iPlayableService.evTunedIn: self.__serviceStarted,
iPlayableService.evStart: self.__serviceStarted
})
self.__serviceStarted()
def __serviceStarted(self):
self.closeNotificationInstantiateDialog()
self.last_error = None
Notifications.RemovePopup(id = "ZapError")
def __tuneFailed(self):
if not config.usage.hide_zap_errors.value or not config.usage.remote_fallback_enabled.value:
service = self.session.nav.getCurrentService()
info = service and service.info()
error = info and info.getInfo(iServiceInformation.sDVBState)
if not config.usage.remote_fallback_enabled.value and (error == eDVBServicePMTHandler.eventMisconfiguration or error == eDVBServicePMTHandler.eventNoResources):
self.session.nav.currentlyPlayingServiceReference = None
self.session.nav.currentlyPlayingServiceOrGroup = None
if error == self.last_error:
error = None
else:
self.last_error = error
error = {
eDVBServicePMTHandler.eventNoResources: _("No free tuner!"),
eDVBServicePMTHandler.eventTuneFailed: _("Tune failed!"),
eDVBServicePMTHandler.eventNoPAT: _("No data on transponder!\n(Timeout reading PAT)"),
eDVBServicePMTHandler.eventNoPATEntry: _("Service not found!\n(SID not found in PAT)"),
eDVBServicePMTHandler.eventNoPMT: _("Service invalid!\n(Timeout reading PMT)"),
eDVBServicePMTHandler.eventNewProgramInfo: None,
eDVBServicePMTHandler.eventTuned: None,
eDVBServicePMTHandler.eventSOF: None,
eDVBServicePMTHandler.eventEOF: None,
eDVBServicePMTHandler.eventMisconfiguration: _("Service unavailable!\nCheck tuner configuration!"),
}.get(error) #this returns None when the key not exist in the dict
if error and not config.usage.hide_zap_errors.value:
self.closeNotificationInstantiateDialog()
if hasattr(self, "dishDialog") and not self.dishDialog.dishState():
Notifications.AddPopup(text = error, type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapError")
class InfoBarPowersaver:
def __init__(self):
self.inactivityTimer = eTimer()
self.inactivityTimer.callback.append(self.inactivityTimeout)
self.restartInactiveTimer()
self.sleepTimer = eTimer()
self.sleepStartTime = 0
self.sleepTimer.callback.append(self.sleepTimerTimeout)
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypress)
def keypress(self, key, flag):
if flag:
self.restartInactiveTimer()
def restartInactiveTimer(self):
time = abs(int(config.usage.inactivity_timer.value))
if time:
self.inactivityTimer.startLongTimer(time)
else:
self.inactivityTimer.stop()
def inactivityTimeout(self):
if config.usage.inactivity_timer_blocktime.value:
curtime = localtime(time())
if curtime.tm_year > 1970: #check if the current time is valid
curtime = (curtime.tm_hour, curtime.tm_min, curtime.tm_sec)
begintime = tuple(config.usage.inactivity_timer_blocktime_begin.value)
endtime = tuple(config.usage.inactivity_timer_blocktime_end.value)
begintime_extra = tuple(config.usage.inactivity_timer_blocktime_extra_begin.value)
endtime_extra = tuple(config.usage.inactivity_timer_blocktime_extra_end.value)
if begintime <= endtime and (curtime >= begintime and curtime < endtime) or begintime > endtime and (curtime >= begintime or curtime < endtime) or config.usage.inactivity_timer_blocktime_extra.value and\
(begintime_extra <= endtime_extra and (curtime >= begintime_extra and curtime < endtime_extra) or begintime_extra > endtime_extra and (curtime >= begintime_extra or curtime < endtime_extra)):
duration = (endtime[0]*3600 + endtime[1]*60) - (curtime[0]*3600 + curtime[1]*60 + curtime[2])
if duration:
if duration < 0:
duration += 24*3600
self.inactivityTimer.startLongTimer(duration)
return
if Screens.Standby.inStandby:
self.inactivityTimeoutCallback(True)
else:
message = _("Your receiver will got to standby due to inactivity.") + "\n" + _("Do you want this?")
self.session.openWithCallback(self.inactivityTimeoutCallback, MessageBox, message, timeout=60, simple=True, default=False, timeout_default=True)
def inactivityTimeoutCallback(self, answer):
if answer:
self.goStandby()
else:
print "[InfoBarPowersaver] abort"
def sleepTimerState(self):
if self.sleepTimer.isActive():
return (self.sleepStartTime - time()) / 60
return 0
def setSleepTimer(self, sleepTime):
print "[InfoBarPowersaver] set sleeptimer", sleepTime
if sleepTime:
m = abs(sleepTime / 60)
message = _("The sleep timer has been activated.") + "\n" + _("And will put your receiver in standby over ") + ngettext("%d minute", "%d minutes", m) % m
self.sleepTimer.startLongTimer(sleepTime)
self.sleepStartTime = time() + sleepTime
else:
message = _("The sleep timer has been disabled.")
self.sleepTimer.stop()
Notifications.AddPopup(message, type = MessageBox.TYPE_INFO, timeout = 5)
def sleepTimerTimeout(self):
if not Screens.Standby.inStandby:
list = [ (_("Yes"), True), (_("Extend sleeptimer 15 minutes"), "extend"), (_("No"), False) ]
message = _("Your receiver will got to stand by due to the sleeptimer.")
message += "\n" + _("Do you want this?")
self.session.openWithCallback(self.sleepTimerTimeoutCallback, MessageBox, message, timeout=60, simple=True, list=list, default=False, timeout_default=True)
def sleepTimerTimeoutCallback(self, answer):
if answer == "extend":
print "[InfoBarPowersaver] extend sleeptimer"
self.setSleepTimer(900)
elif answer:
self.goStandby()
else:
print "[InfoBarPowersaver] abort"
self.setSleepTimer(0)
def goStandby(self):
if not Screens.Standby.inStandby:
print "[InfoBarPowersaver] goto standby"
self.session.open(Screens.Standby.Standby)
class InfoBarHDMI:
def HDMIIn(self):
slist = self.servicelist
if slist.dopipzap:
curref = self.session.pip.getCurrentService()
if curref and curref.type != 8192:
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.session.pip.playService(slist.servicelist.getCurrent())
else:
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if curref and curref.type != 8192:
if curref and curref.type != -1 and os.path.splitext(curref.toString().split(":")[10])[1].lower() in AUDIO_EXTENSIONS.union(MOVIE_EXTENSIONS, DVD_EXTENSIONS):
setResumePoint(self.session)
self.session.nav.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
elif isStandardInfoBar(self):
self.session.nav.playService(slist.servicelist.getCurrent())
else:
self.session.nav.playService(self.cur_service)
|
xavierwu/scikit-learn | refs/heads/master | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
|
stephen144/odoo | refs/heads/9.0 | addons/website_slides/models/__init__.py | 77 | # -*- coding: utf-8 -*-
import res_config
import slides
|
mdaniel/intellij-community | refs/heads/master | python/testData/refactoring/move/function/before/src/b.py | 83 | def g():
return None |
WebDerekh/blackfriday-horze | refs/heads/master | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
pmacosta/putil | refs/heads/master | tests/test_pinspect.py | 1 | # test_pinspect.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,E0611,F0401,R0201,R0903,R0913,R0915,W0104,W0212,W0232,W0612,W0613,W0621
# Standard library imports
from __future__ import print_function
from functools import partial
import copy
import os
import sys
import time
import types
# PyPI imports
import pytest
if sys.hexversion == 0x03000000:
from putil.compat3 import _readlines
# Putil imports
import putil.pinspect
from putil.test import AE, AI, CS, GET_EXMSG, RE
###
# Helper functions
###
modfile = lambda x: sys.modules[x].__file__
###
# Tests for module functions
###
def test_private_props():
""" Test private_props function behavior """
obj = putil.pinspect.Callables()
assert sorted(list(putil.pinspect.private_props(obj))) == [
'_callables_db',
'_class_names',
'_fnames',
'_module_names',
'_modules_dict',
'_reverse_callables_db'
]
if sys.hexversion == 0x03000000:
def test_readlines():
""" Test _readlines function behavior """
def mopen1(fname, mode):
raise RuntimeError('Mock mopen1 function')
def mopen2(fname, mode):
text = chr(40960) + 'abcd' + chr(1972)
# Next line raises UnicodeDecodeError
b'\x80abc'.decode("utf-8", "strict")
class MockOpenCls(object):
def __init__(self, fname, mode, encoding):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type is not None:
return False
def readlines(self):
return 'MockOpenCls'
pkg_dir = os.path.abspath(os.path.dirname(__file__))
fname = os.path.join(pkg_dir, 'test_misc.py')
# This should not trigger an exception (functionality checked
# by other unit tests)
_readlines(fname)
# Trigger unrelated exception exception
obj = _readlines
with pytest.raises(RuntimeError) as excinfo:
_readlines(fname, mopen1)
assert GET_EXMSG(excinfo) == 'Mock mopen1 function'
# Trigger UnicodeDecodeError exception
assert _readlines(fname, mopen2, MockOpenCls) == 'MockOpenCls'
def test_object_is_module():
""" Test object_is_module() function """
assert not putil.pinspect.is_object_module(5)
assert putil.pinspect.is_object_module(sys.modules['putil.pinspect'])
def test_get_module_name():
""" Test get_module_name() function """
obj = putil.pinspect.get_module_name
AI(obj, 'module_obj', module_obj=5)
mock_module_obj = types.ModuleType('mock_module_obj', 'Mock module')
exmsg = (
'Module object `mock_module_obj` could not be found in loaded modules'
)
AE(obj, RE, exmsg, module_obj=mock_module_obj)
ref = 'putil.pinspect'
assert putil.pinspect.get_module_name(sys.modules[ref]) == ref
assert putil.pinspect.get_module_name(sys.modules['putil']) == 'putil'
def test_get_module_name_from_fname():
""" Test _get_module_name_from_fname() function """
obj = putil.pinspect._get_module_name_from_fname
AE(obj, RE, 'Module could not be found', fname='_not_a_module')
assert obj(modfile('putil.pinspect')) == 'putil.pinspect'
def test_is_special_method():
""" Test is_special_method() function """
assert not putil.pinspect.is_special_method('func_name')
assert not putil.pinspect.is_special_method('_func_name_')
assert putil.pinspect.is_special_method('__func_name__')
###
# Test for classes
###
class TestCallables(object):
""" Test for Callables """
def test_check_intersection(self):
""" Test _check_intersection method behavior """
obj1 = putil.pinspect.Callables()
obj1._callables_db = {'call1':1, 'call2':2}
obj2 = putil.pinspect.Callables()
obj2._callables_db = {'call1':1, 'call2':'a'}
exmsg = 'Conflicting information between objects'
obj = obj1._check_intersection
AE(obj, RE, exmsg, other=obj2)
obj1._callables_db = {'call1':1, 'call2':['a', 'c']}
obj2._callables_db = {'call1':1, 'call2':['a', 'b']}
AE(obj, RE, exmsg, other=obj2)
obj1._callables_db = {'call1':1, 'call2':{'a':'b'}}
obj2._callables_db = {'call1':1, 'call2':{'a':'c'}}
AE(obj, RE, exmsg, other=obj2)
obj1._callables_db = {'call1':1, 'call2':'a'}
obj2._callables_db = {'call1':1, 'call2':'c'}
AE(obj, RE, exmsg, other=obj2)
obj1._callables_db = {'call1':1, 'call2':'a'}
obj2._callables_db = {'call1':1, 'call2':'a'}
assert obj1._check_intersection(obj2) is None
def test_init_exceptions(self):
""" Test constructor exceptions """
obj = putil.pinspect.Callables
for item in [5, [5]]:
AI(obj, 'fnames', fnames=item)
exmsg = 'File _not_a_file_ could not be found'
AE(obj, OSError, exmsg, fnames=['_not_a_file_'])
def test_add(self):
""" Test __add__ __radd__ method behavior """
obj1 = putil.pinspect.Callables()
obj1._callables_db = {'call1':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}
obj1._reverse_callables_db = {'rc1':'5', 'rc2':'7'}
obj1._modules_dict = {
'key1':{'entry':'alpha'}, 'key2':{'entry':'beta'}
}
obj1._fnames = {'hello':0}
obj1._module_names = ['this', 'is']
obj1._class_names = ['once', 'upon']
#
obj2 = putil.pinspect.Callables()
obj2._callables_db = {
'call3':{'a':10, 'b':100}, 'call4':{'a':200, 'b':300}
}
obj2._reverse_callables_db = {'rc3':'0', 'rc4':'1'}
obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}
obj2._fnames = {'world':1}
obj2._module_names = ['a', 'test']
obj2._class_names = ['a', 'time']
#
obj1._callables_db = {'call3':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}
with pytest.raises(RuntimeError) as excinfo:
obj1+obj2
assert GET_EXMSG(excinfo) == 'Conflicting information between objects'
obj1._callables_db = {'call1':{'a':5, 'b':6}, 'call2':{'a':7, 'b':8}}
#
obj2._reverse_callables_db = {'rc3':'5', 'rc2':'-1'}
with pytest.raises(RuntimeError) as excinfo:
obj1+obj2
assert GET_EXMSG(excinfo) == 'Conflicting information between objects'
obj2._reverse_callables_db = {'rc3':'0', 'rc4':'-1'}
#
obj2._modules_dict = {'key1':{'entry':'pi'}, 'key4':{'entry':'gamma'}}
with pytest.raises(RuntimeError) as excinfo:
obj1+obj2
assert GET_EXMSG(excinfo) == 'Conflicting information between objects'
obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}
# Test when intersection is the same
obj2._modules_dict = {
'key1':{'entry':'alpha'}, 'key4':{'entry':'gamma'}
}
obj1+obj2
obj2._modules_dict = {'key3':{'entry':'pi'}, 'key4':{'entry':'gamma'}}
#
sobj = obj1+obj2
scomp = lambda x, y: sorted(x) == sorted(y)
ref = {
'call1':{'a':5, 'b':6},
'call2':{'a':7, 'b':8},
'call3':{'a':10, 'b':100},
'call4':{'a':200, 'b':300}
}
assert scomp(sobj._callables_db, ref)
ref = {'rc1':'5', 'rc2':'7', 'rc3':'0', 'rc4':'-1'}
assert scomp(sobj._reverse_callables_db, ref)
ref = {
'key1':{'entry':'alpha'},
'key2':{'entry':'beta'},
'key3':{'entry':'pi'},
'key4':{'entry':'gamma'}
}
assert scomp(sobj._modules_dict, ref)
assert scomp(sobj._fnames, {'hello':0, 'world':1})
assert scomp(sobj._module_names, ['this', 'is', 'a', 'test'])
assert scomp(sobj._class_names, ['once', 'upon', 'a', 'time'])
#
obj1 += obj2
ref = {
'call1':{'a':5, 'b':6},
'call2':{'a':7, 'b':8},
'call3':{'a':10, 'b':100},
'call4':{'a':200, 'b':300}
}
assert scomp(obj1._callables_db, ref)
ref = {'rc1':'5', 'rc2':'7', 'rc3':'0', 'rc4':'-1'}
assert scomp(obj1._reverse_callables_db, ref)
ref = {
'key1':{'entry':'alpha'},
'key2':{'entry':'beta'},
'key3':{'entry':'pi'},
'key4':{'entry':'gamma'}
}
assert scomp(obj1._modules_dict, ref)
assert scomp(obj1._fnames, {'hello':0, 'world':1})
assert scomp(obj1._module_names, ['this', 'is', 'a', 'test'])
assert scomp(obj1._class_names, ['once', 'upon', 'a', 'time'])
def test_copy(self):
""" Test __copy__ method behavior """
sobj = putil.pinspect.Callables()
import tests.support.pinspect_support_module_1
sobj.trace([modfile('tests.support.pinspect_support_module_1')])
dobj = copy.copy(sobj)
assert sobj._module_names == dobj._module_names
assert id(sobj._module_names) != id(dobj._module_names)
assert sobj._class_names == dobj._class_names
assert id(sobj._class_names) != id(dobj._class_names)
assert sobj._callables_db == dobj._callables_db
assert id(sobj._callables_db) != id(dobj._callables_db)
assert sobj._reverse_callables_db == dobj._reverse_callables_db
assert id(sobj._reverse_callables_db) != id(dobj._reverse_callables_db)
def test_eq(self):
""" Test __eq__ method behavior """
obj1 = putil.pinspect.Callables()
obj2 = putil.pinspect.Callables()
obj3 = putil.pinspect.Callables()
import tests.support.pinspect_support_module_1
import tests.support.pinspect_support_module_2
mname = 'tests.support.pinspect_support_module_1'
obj1.trace([modfile(mname)])
obj2.trace([modfile(mname)])
obj3.trace([modfile('putil.test')])
assert (obj1 == obj2) and (obj1 != obj3)
assert obj1 != 5
def test_repr(self):
""" Test __repr__ method behavior """
get_name = lambda x: modfile(x).replace('.pyc', '.py')
import tests.support.exdoc_support_module_1
file1 = get_name('tests.support.exdoc_support_module_1')
file2 = get_name('tests.support.exdoc_support_module_2')
xobj = putil.pinspect.Callables([file2])
xobj.trace([file1])
ref = "putil.pinspect.Callables([{0}, {1}])".format(
repr(file1), repr(file2)
)
assert repr(xobj) == ref
def test_str_empty(self):
""" Test __str__ magic method when object is empty """
obj = putil.pinspect.Callables()
assert str(obj) == ''
def test_refresh(self):
""" Test refresh method behavior """
ref = modfile('putil.test')
src = os.path.join(os.path.dirname(ref), 'pit.py')
with open(src, 'w') as fobj:
fobj.write(
'class MyClass(object):\n'
' pass\n'
'def func1():\n'
' pass\n'
)
import putil.pit
obj = putil.pinspect.Callables([ref, src])
tmod = obj._fnames[src]
obj.trace([src])
assert obj._fnames[src] == tmod
rtext = (
'Modules:\n'
' putil.pit\n'
' putil.test\n'
'Classes:\n'
' putil.pit.MyClass\n'
'putil.pit.MyClass: class (1-2)\n'
'putil.pit.func1: func (3-4)\n'
'putil.test._get_fargs: func (32-67)\n'
'putil.test._pcolor: func (68-82)\n'
'putil.test.assert_arg_invalid: func (83-115)\n'
'putil.test.assert_exception: func (116-199)\n'
'putil.test._invalid_frame: func (200-206)\n'
'putil.test.assert_prop: func (207-246)\n'
'putil.test.assert_ro_prop: func (247-266)\n'
'putil.test.compare_strings: func (267-356)\n'
'putil.test.compare_strings.colorize_lines: func (296-307)\n'
'putil.test.compare_strings.print_non_diff: func (308-312)\n'
'putil.test.compare_strings.print_diff: func (313-321)\n'
'putil.test.comp_list_of_dicts: func (357-371)\n'
'putil.test.exception_type_str: func (372-389)\n'
'putil.test.get_exmsg: func (390-404)'
)
CS(str(obj), rtext)
ftime = int(os.path.getmtime(src))
while int(time.time()) <= ftime:
time.sleep(0.1)
os.remove(src)
content = 'def my_func():\n pass'
with open(src, 'w') as fobj:
fobj.write(content)
obj.refresh()
assert obj._fnames[src] != tmod
rtext = (
'Modules:\n'
' putil.pit\n'
' putil.test\n'
'putil.pit.my_func: func (1-2)\n'
'putil.test._get_fargs: func (32-67)\n'
'putil.test._pcolor: func (68-82)\n'
'putil.test.assert_arg_invalid: func (83-115)\n'
'putil.test.assert_exception: func (116-199)\n'
'putil.test._invalid_frame: func (200-206)\n'
'putil.test.assert_prop: func (207-246)\n'
'putil.test.assert_ro_prop: func (247-266)\n'
'putil.test.compare_strings: func (267-356)\n'
'putil.test.compare_strings.colorize_lines: func (296-307)\n'
'putil.test.compare_strings.print_non_diff: func (308-312)\n'
'putil.test.compare_strings.print_diff: func (313-321)\n'
'putil.test.comp_list_of_dicts: func (357-371)\n'
'putil.test.exception_type_str: func (372-389)\n'
'putil.test.get_exmsg: func (390-404)'
)
CS(str(obj), rtext)
## Test malformed JSON file
obj = putil.pinspect.Callables()
json_src = os.path.join(os.path.dirname(ref), 'pit.json')
json_txt = (
'{{\n'
' "_callables_db": {{\n'
' "putil.pit.my_func": {{\n'
' "code_id": [\n'
' "{pyfile}",\n'
' 1\n'
' ],\n'
' "last_lineno": 2,\n'
' "name": "putil.pit.my_func",\n'
' "type": "func"\n'
' }}\n'
' }},\n'
' "_class_names": [],\n'
' "_fnames": {{\n'
' "{pyfile}": {{\n'
' "classes": [],\n'
' "date": 1,\n'
' "name": "putil.pit"\n'
' }}\n'
' }},\n'
' "_module_names": [\n'
' "putil.pit"\n'
' ],\n'
' "_modules_dict": {{\n'
' "putil.pit": [\n'
' {{\n'
' "code_id": [\n'
' "{pyfile}",\n'
' 1\n'
' ],\n'
' "last_lineno": 2,\n'
' "name": "putil.pit.my_func",\n'
' "type": "func"\n'
' }}\n'
' ]\n'
' }},\n'
' "_reverse_callables_db": {{\n'
' "(\'{pyfile}\', 1)": "putil.pit.my_func",\n'
' "(\'{pyfile}\', 10)": "putil.pit.my_func"\n'
' }}\n'
'}}\n'
)
with open(json_src, 'w') as fobj:
fobj.write(json_txt.format(pyfile=src.replace('\\', '/')))
obj.load(json_src)
obj.refresh()
os.remove(json_src)
os.remove(src)
def test_load_save(self):
""" Test load and save methods behavior """
# pylint: disable=R0914
import putil.pcsv
import tests.support.exdoc_support_module_1
# Empty object
obj1 = putil.pinspect.Callables()
with putil.misc.TmpFile() as fname:
obj1.save(fname)
obj2 = putil.pinspect.Callables()
obj2.load(fname)
assert obj1 == obj2
# 1 module trace
mname = 'putil.pcsv.csv_file'
cname = '{0}.CsvFile'.format(mname)
obj1 = putil.pinspect.Callables([modfile(mname)])
with putil.misc.TmpFile() as fname:
obj1.save(fname)
obj2 = putil.pinspect.Callables()
assert not bool(obj2)
obj2.load(fname)
assert obj1 == obj2
# Test merging of traced and file-based module information
mname1 = 'putil.pcsv.csv_file'
obj1 = putil.pinspect.Callables([modfile(mname1)])
mname2 = 'tests.support.exdoc_support_module_1'
obj2 = putil.pinspect.Callables([modfile(mname2)])
with putil.misc.TmpFile() as fname1:
with putil.misc.TmpFile() as fname2:
obj1.save(fname1)
obj2.save(fname2)
obj3 = putil.pinspect.Callables(
[modfile(mname1), modfile(mname2)]
)
obj4 = putil.pinspect.Callables()
obj4.load(fname2)
obj4.load(fname1)
assert obj3 == obj4
def test_load_exceptions(self):
""" Test load method exceptions """
obj = putil.pinspect.Callables()
for item in [True, 5]:
AI(obj.load, 'callables_fname', callables_fname=item)
exmsg = 'File _not_a_file_ could not be found'
AE(obj.load, OSError, exmsg, callables_fname='_not_a_file_')
def test_save_exceptions(self):
""" Test save method exceptions """
obj = putil.pinspect.Callables()
for item in [True, 5]:
AI(obj.save, 'callables_fname', callables_fname=item)
def test_trace(self):
""" Test trace method behavior """
import putil.pcsv
mname = 'putil.pcsv.csv_file'
cname = '{0}.CsvFile'.format(mname)
xobj = putil.pinspect.Callables([modfile(mname)])
ref = []
ref.append('Modules:')
ref.append(' {0}'.format(mname))
ref.append('Classes:')
ref.append(' {0}'.format(cname))
ref.append('{0}._homogenize_data_filter: func (44-66)'.format(mname))
ref.append('{0}._tofloat: func (67-82)'.format(mname))
ref.append('{0}: class (83-958)'.format(cname))
ref.append('{0}.__init__: meth (134-207)'.format(cname))
ref.append('{0}.__eq__: meth (208-242)'.format(cname))
ref.append('{0}.__repr__: meth (243-276)'.format(cname))
ref.append('{0}.__str__: meth (277-321)'.format(cname))
ref.append('{0}._format_rfilter: meth (322-338)'.format(cname))
ref.append('{0}._gen_col_index: meth (339-351)'.format(cname))
ref.append('{0}._get_cfilter: meth (352-354)'.format(cname))
ref.append('{0}._get_dfilter: meth (355-357)'.format(cname))
ref.append('{0}._get_rfilter: meth (358-360)'.format(cname))
ref.append('{0}._reset_dfilter_int: meth (361-366)'.format(cname))
ref.append('{0}._in_header: meth (367-401)'.format(cname))
ref.append('{0}._set_cfilter: meth (402-406)'.format(cname))
ref.append('{0}._set_dfilter: meth (407-412)'.format(cname))
ref.append('{0}._set_rfilter: meth (413-417)'.format(cname))
ref.append('{0}._add_dfilter_int: meth (418-460)'.format(cname))
ref.append('{0}._apply_filter: meth (461-493)'.format(cname))
ref.append('{0}._set_has_header: meth (494-497)'.format(cname))
ref.append('{0}._validate_frow: meth (498-503)'.format(cname))
ref.append('{0}._validate_rfilter: meth (504-537)'.format(cname))
ref.append('{0}.add_dfilter: meth (538-561)'.format(cname))
ref.append('{0}.cols: meth (562-581)'.format(cname))
ref.append('{0}.data: meth (582-610)'.format(cname))
ref.append('{0}.dsort: meth (611-663)'.format(cname))
ref.append('{0}.header: meth (664-695)'.format(cname))
ref.append('{0}.replace: meth (696-766)'.format(cname))
ref.append('{0}.reset_dfilter: meth (767-784)'.format(cname))
ref.append('{0}.rows: meth (785-804)'.format(cname))
ref.append('{0}.write: meth (805-887)'.format(cname))
ref.append('{0}.cfilter: prop (888-910)'.format(cname))
ref.append('{0}.dfilter: prop (911-934)'.format(cname))
ref.append('{0}.rfilter: prop (935-958)'.format(cname))
ref_txt = '\n'.join(ref)
actual_txt = str(xobj)
CS(actual_txt, ref_txt)
#
import tests.support.exdoc_support_module_1
mname = 'tests.support.exdoc_support_module_1'
xobj = putil.pinspect.Callables([modfile(mname)])
ref = []
cname = '{0}.ExceptionAutoDocClass'.format(mname)
ref.append('Modules:')
ref.append(' {0}'.format(mname))
ref.append('Classes:')
ref.append(' {0}'.format(cname))
ref.append(' {0}.MyClass'.format(mname))
ref.append('{0}._validate_arguments: func (17-31)'.format(mname))
ref.append('{0}._write: func (32-36)'.format(mname))
ref.append('{0}.write: func (37-50)'.format(mname))
ref.append('{0}.read: func (51-62)'.format(mname))
ref.append('{0}.probe: func (63-74)'.format(mname))
ref.append('{0}.dummy_decorator1: func (75-79)'.format(mname))
ref.append('{0}.dummy_decorator2: func (80-91)'.format(mname))
ref.append('{0}.dummy_decorator2.wrapper: func (86-88)'.format(mname))
ref.append('{0}.mlmdfunc: func (92-108)'.format(mname))
ref.append('{0}: class (109-251)'.format(cname))
ref.append('{0}.__init__: meth (112-124)'.format(cname))
ref.append('{0}._del_value3: meth (125-132)'.format(cname))
ref.append('{0}._get_value3: meth (133-141)'.format(cname))
ref.append('{0}._set_value1: meth (142-152)'.format(cname))
ref.append('{0}._set_value2: meth (153-166)'.format(cname))
ref.append('{0}._set_value3: meth (167-177)'.format(cname))
ref.append('{0}.add: meth (178-184)'.format(cname))
ref.append('{0}.subtract: meth (185-191)'.format(cname))
ref.append('{0}.multiply: meth (192-204)'.format(cname))
ref.append('{0}.divide: meth (205-214)'.format(cname))
ref.append('{0}.temp(getter): meth (215-219)'.format(cname))
ref.append('{0}.temp(setter): meth (220-225)'.format(cname))
ref.append('{0}.temp(deleter): meth (226-231)'.format(cname))
ref.append('{0}.value1: prop (232-240)'.format(cname))
ref.append('{0}.value2: prop (241-246)'.format(cname))
ref.append('{0}.value3: prop (247-248)'.format(cname))
ref.append('{0}.value4: prop (249-251)'.format(cname))
ref.append('{0}.my_func: func (252-254)'.format(mname))
ref.append('{0}.MyClass: class (255-259)'.format(mname))
ref.append('{0}.MyClass.value: prop (259)'.format(mname))
ref_txt = '\n'.join(ref)
actual_txt = str(xobj)
CS(actual_txt, ref_txt)
#
import tests.test_exdoc
mname = 'tests.test_exdoc'
xobj = putil.pinspect.Callables([modfile(mname)])
cname1 = '{0}.TestExDocCxt'.format(mname)
cname2 = '{0}.TestExDoc'.format(mname)
mename1 = '{0}.test_multiple'.format(cname1)
mename2 = '{0}.test_build_ex_tree'.format(cname2)
meroot = '{0}.test_get_sphinx'.format(cname2)
ref = []
ref.append('Modules:')
ref.append(' tests.test_exdoc')
ref.append('Classes:')
ref.append(' tests.test_exdoc.MockFCode')
ref.append(' tests.test_exdoc.MockGetFrame')
ref.append(' tests.test_exdoc.TestExDoc')
ref.append(' tests.test_exdoc.TestExDocCxt')
ref.append('tests.test_exdoc.exdocobj: func (50-83)')
ref.append('tests.test_exdoc.exdocobj.multi_level_write: func (55-60)')
ref.append('tests.test_exdoc.exdocobj_raised: func (84-97)')
ref.append('tests.test_exdoc.exdocobj_single: func (98-107)')
ref.append('tests.test_exdoc.simple_exobj: func (108-123)')
ref.append('tests.test_exdoc.simple_exobj.func1: func (113-116)')
ref.append('tests.test_exdoc.mock_getframe: func (124-127)')
ref.append('tests.test_exdoc.trace_error_class: func (128-139)')
ref.append('tests.test_exdoc.MockFCode: class (140-145)')
ref.append('tests.test_exdoc.MockFCode.__init__: meth (141-145)')
ref.append('tests.test_exdoc.MockGetFrame: class (146-153)')
ref.append('tests.test_exdoc.MockGetFrame.__init__: meth (147-153)')
ref.append('{0}: class (154-263)'.format(cname1))
ref.append('{0}.test_init: meth (156-208)'.format(cname1))
ref.append('{0}.test_init.check_ctx1: func (159-164)'.format(cname1))
ref.append('{0}.test_init.check_ctx2: func (165-171)'.format(cname1))
ref.append('{0}.test_init.func0: func (172-178)'.format(cname1))
ref.append('{0}: meth (209-245)'.format(mename1))
ref.append('{0}.func1: func (211-217)'.format(mename1))
ref.append('{0}.test_trace: func (218-234)'.format(mename1))
ref.append('{0}.test_save_callables: meth (246-263)'.format(cname1))
ref.append('{0}: class (264-698)'.format(cname2))
ref.append('{0}.test_init: meth (266-282)'.format(cname2))
ref.append('{0}.test_copy: meth (283-296)'.format(cname2))
ref.append('{0}: meth (297-395)'.format(mename2))
ref.append('{0}.func1: func (304-307)'.format(mename2))
ref.append('{0}.mock_add_nodes1: func (309-310)'.format(mename2))
ref.append('{0}.mock_add_nodes2: func (311-312)'.format(mename2))
ref.append('{0}.mock_add_nodes3: func (313-314)'.format(mename2))
ref.append('{0}.test_depth: meth (396-403)'.format(cname2))
ref.append('{0}.test_exclude: meth (404-411)'.format(cname2))
ref.append('{0}_autodoc: meth (412-439)'.format(meroot))
ref.append('{0}_doc: meth (440-698)'.format(meroot))
ref_txt = '\n'.join(ref)
actual_txt = str(xobj)
CS(actual_txt, ref_txt)
#
import tests.support.pinspect_support_module_4
mname = 'tests.support.pinspect_support_module_4'
xobj = putil.pinspect.Callables([modfile(mname)])
ref = []
fname = '{0}.another_property_action_enclosing_function'.format(mname)
ref.append('Modules:')
ref.append(' {0}'.format(mname))
ref.append('{0}: func (16-24)'.format(fname))
ref.append('{0}.fget: func (21-23)'.format(fname))
ref_txt = '\n'.join(ref)
actual_txt = str(xobj)
CS(actual_txt, ref_txt)
# Test re-tries, should produce no action and raise no exception
xobj = putil.pinspect.Callables([modfile(mname)])
import tests.support.pinspect_support_module_10
mname = 'tests.support.pinspect_support_module_10'
xobj = putil.pinspect.Callables([modfile(mname)])
ref = []
cname = '{0}.AClass'.format(mname)
ref.append('Modules:')
ref.append(' {0}'.format(mname))
ref.append('Classes:')
ref.append(' {0}'.format(cname))
ref.append(' {0}.method1.SubClass'.format(cname))
ref.append('{0}: class (6-28)'.format(cname))
ref.append('{0}.method1: meth (12-25)'.format(cname))
ref.append('{0}.method1.func1: func (15-18)'.format(cname))
ref.append('{0}.method1.SubClass: class (20-23)'.format(cname))
ref.append('{0}.method1.SubClass.__init__: meth (22-23)'.format(cname))
ref.append('{0}.method2: meth (26-28)'.format(cname))
ref_txt = '\n'.join(ref)
actual_txt = str(xobj)
CS(actual_txt, ref_txt)
def test_callables_db(self):
""" Test callables_db property """
import tests.support.pinspect_support_module_4
mname = 'tests.support.pinspect_support_module_4'
xobj = putil.pinspect.Callables([modfile(mname)])
pkg_dir = os.path.dirname(__file__)
ref = {
'tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function': {
'code_id': (
os.path.join(
pkg_dir,
'support',
'pinspect_support_module_4.py'
), 16
),
'last_lineno': 21,
'name': 'pinspect_support_module_4.'
'another_property_action_enclosing_function',
'type': 'func'
},
'tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function.fget': {
'code_id': (
os.path.join(
pkg_dir,
'support',
'pinspect_support_module_4.py'
), 18
),
'last_lineno': 20,
'name': 'pinspect_support_module_4.'
'another_property_action_enclosing_function.fget',
'type': 'func'
}
}
assert sorted(xobj.callables_db) == sorted(ref)
ref = {
(
os.path.join(
pkg_dir,
'support',
'pinspect_support_module_4.py'
),
16
): (
'pinspect_support_module_4.'
'another_property_action_enclosing_function'
),
(
os.path.join(
pkg_dir,
'support',
'pinspect_support_module_4.py'
),
21
): (
'pinspect_support_module_4.'
'another_property_action_enclosing_function.fget'
)
}
assert sorted(xobj.reverse_callables_db) == sorted(ref)
def test_get_callable_from_line(self):
""" Test get_callable_from_line() function """
xobj = putil.pinspect.Callables()
import tests.support.pinspect_support_module_4
fname = modfile('tests.support.pinspect_support_module_4')
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function')
assert xobj.get_callable_from_line(fname, 16) == ref
xobj = putil.pinspect.Callables([fname])
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function')
assert xobj.get_callable_from_line(fname, 16) == ref
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function')
assert xobj.get_callable_from_line(fname, 17) == ref
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function')
assert xobj.get_callable_from_line(fname, 24) == ref
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function.fget')
assert xobj.get_callable_from_line(fname, 21) == ref
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function.fget')
assert xobj.get_callable_from_line(fname, 22) == ref
ref = ('tests.support.pinspect_support_module_4.'
'another_property_action_enclosing_function.fget')
assert xobj.get_callable_from_line(fname, 23) == ref
ref = 'tests.support.pinspect_support_module_4'
assert xobj.get_callable_from_line(fname, 100) == ref
##
# Tests for get_function_args()
###
class TestGetFunctionArgs(object):
""" Tests for get_function_args function """
def test_all_positional_arguments(self):
"""
Test that function behaves properly when all arguments are positional
arguments
"""
def func(ppar1, ppar2, ppar3):
pass
obj = putil.pinspect.get_function_args
assert obj(func) == ('ppar1', 'ppar2', 'ppar3')
def test_all_keyword_arguments(self):
"""
Test that function behaves properly when all arguments are keywords
arguments
"""
def func(kpar1=1, kpar2=2, kpar3=3):
pass
obj = putil.pinspect.get_function_args
assert obj(func) == ('kpar1', 'kpar2', 'kpar3')
def test_positional_and_keyword_arguments(self):
"""
Test that function behaves properly when arguments are a mix of
positional and keywords arguments
"""
def func(ppar1, ppar2, ppar3, kpar1=1, kpar2=2, kpar3=3, **kwargs):
pass
assert putil.pinspect.get_function_args(func) == (
'ppar1', 'ppar2', 'ppar3', 'kpar1', 'kpar2', 'kpar3', '**kwargs'
)
assert putil.pinspect.get_function_args(func, no_varargs=True) == (
'ppar1', 'ppar2', 'ppar3', 'kpar1', 'kpar2', 'kpar3'
)
def test_no_arguments(self):
"""
Test that function behaves properly when there are no arguments
passed
"""
def func():
pass
assert putil.pinspect.get_function_args(func) == ()
def test_no_self(self):
"""
Test that function behaves properly when there are no arguments
passed
"""
class MyClass(object):
def __init__(self, value, **kwargs):
pass
obj = partial(putil.pinspect.get_function_args, MyClass.__init__)
assert obj() == ('self', 'value', '**kwargs')
assert obj(no_self=True) == ('value', '**kwargs')
assert obj(no_self=True, no_varargs=True) == ('value', )
assert obj(no_varargs=True) == ('self', 'value')
def test_nonzero(self):
""" Test __nonzero__() function """
obj = putil.pinspect.Callables()
assert not obj
obj.trace([modfile('putil.test')])
assert obj
|
codepantry/django | refs/heads/master | tests/admin_scripts/custom_templates/project_template/project_name/settings.py | 738 | # Django settings for {{ project_name }} test project.
|
freedomtan/tensorflow | refs/heads/master | tensorflow/python/training/optimizer.py | 5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_filtered_grad_fn(grad_fn):
# `distributed_context.join()` requires that its arguments are parallel
# across threads, and in particular that `grads_and_vars` has the same
# variables in the same order.
# When computing gradients in eager mode with multiple threads, you
# can get extra variables with a gradient of `None`. This happens when
# those variables are accessed in another thread during the gradient
# computation. To get a consistent set of variables, we filter out
# those with `None` gradients.
def filtered_grad_fn(*args, **kwargs):
return [(g, v) for g, v in grad_fn(*args, **kwargs) if g is not None]
return filtered_grad_fn
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
# TODO(ashankar): Consolidate handling for eager and graph
if hasattr(var, "op"):
return (var.op.graph, var.op.name)
return var._unique_id # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def __str__(self):
return "<_RefVariableProcessor(%s)>" % self._v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if resource_variable_ops.is_resource_variable(v) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export(v1=["train.Optimizer"])
class Optimizer(
# Optimizers inherit from Trackable rather than AutoTrackable
# since they do most of their dependency management themselves (slot
# variables are special-cased, and non-slot variables are keyed to graphs).
trackable.Trackable):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name :
# {_var_key(variable_to_train): slot_for_the_variable, ... },
# ... }
self._slots = {}
self._non_slot_dict = {}
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
# TODO(isaprykin): When using a DistributionStrategy, and when an
# optimizer is created in each replica, it might be dangerous to
# rely on some Optimizer methods. When such methods are called on a
# per-replica optimizer, an exception needs to be thrown. We do
# allow creation per-replica optimizers however, because the
# compute_gradients()->apply_gradients() sequence is safe.
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes no arguments and computes the value to be minimized. Minimization (and
gradient computation) is done with respect to the elements of `var_list` if
not None, else with respect to any trainable variables created during the
execution of the `loss` function. `gate_gradients`, `aggregation_method`,
`colocate_gradients_with_ops` and `grad_loss` are ignored when eager
execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss if using a "mean" loss reduction and multiple replicas.
# Have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
loss_value = self._scale_loss(loss_value)
if var_list is None:
var_list = tape.watched_variables()
# TODO(jhseu): Figure out why GradientTape's gradients don't require loss
# to be executed.
with ops.control_dependencies([loss_value]):
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
# Non-callable/Tensor loss case
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss if using a "mean" loss reduction and multiple replicas.
loss = self._scale_loss(loss)
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
@staticmethod
def _scale_loss(loss_value):
ops.get_default_graph()._is_loss_scaled_by_optimizer = False # pylint: disable=protected-access
if distribute_lib.get_loss_reduction() == ds_reduce_util.ReduceOp.MEAN:
num_replicas = distribute_ctx.get_strategy().num_replicas_in_sync
if num_replicas > 1:
loss_value *= (1. / num_replicas)
ops.get_default_graph()._is_loss_scaled_by_optimizer = True # pylint: disable=protected-access
return loss_value
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If you should use `_distributed_apply()` instead.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
# TODO(isaprykin): Get rid of `has_strategy()` check by
# always calling _distributed_apply(), using the default distribution
# as needed.
if distribute_ctx.has_strategy():
# Handle DistributionStrategy case.
if distribute_ctx.in_cross_replica_context():
raise RuntimeError("Use `_distributed_apply()` instead of "
"`apply_gradients()` in a cross-replica context.")
grads_and_vars = get_filtered_grad_fn(lambda: grads_and_vars)()
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars, global_step, name))
# No DistributionStrategy case.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, v, _ in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots(var_list)
update_ops = []
with ops.name_scope(name, self._name, skip_on_eager=False) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
if (context.executing_eagerly() or
resource_variable_ops.is_resource_variable(var)
and not var._in_graph_mode): # pylint: disable=protected-access
scope_name = ""
else:
scope_name = var.op.name
with ops.name_scope(
"update_" + scope_name,
skip_on_eager=False), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(
global_step, resource_variable_ops.BaseResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def _distributed_apply(self,
distribution,
grads_and_vars,
global_step=None,
name=None):
"""A version of `apply_gradients` for cross-replica context.
This is a version of `apply_gradients()` for when you are using a
`DistributionStrategy` and are in a cross-replica context. If in a
replica context, use `apply_gradients()` as normal.
Args:
distribution: A `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`, and then aggregated across replicas.
global_step: Optional (mirrored) `Variable` to increment by one
after the variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
replicas. If `global_step` was not None, that operation also
increments `global_step`
"""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
# Note that this is called in a cross-replica context.
with ops.init_scope():
self._create_slots(var_list)
def update(v, g):
"""Apply gradients to a replica variable."""
assert v is not None
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError("Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
if context.executing_eagerly() or (
resource_variable_ops.is_resource_variable(v) and
not v._in_graph_mode): # pylint: disable=protected-access
scope_name = v.name.split(":")[0]
else:
scope_name = v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`. `_resource_apply_dense`, `lr_t`, `beta1_t` and `beta2_t`
# is an example.
with ops.name_scope("update_" + scope_name):
return p.update_op(self, g)
with ops.name_scope(name, self._name) as name:
self._prepare()
update_ops = [
op
for grad, var in grads_and_vars
for op in distribution.extended.update(
var, update, args=(grad,), group=False)
]
def finish(self, update_ops):
return self._finish(update_ops, "update")
non_slot_devices = distribution.extended.non_slot_devices(var_list)
finish_updates = distribution.extended.update_non_slot(
non_slot_devices, finish, args=(self, update_ops), group=False)
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
apply_updates = distribution.extended.update(
global_step, state_ops.assign_add, args=(1,),
kwargs={"name": name})
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
# pylint: disable=protected-access
named_slots = self._slots.get(name, None)
if not named_slots:
return None
if hasattr(var, "_distributed_container"):
# NOTE: If this isn't patched, then there is no `handle` in
# `_resource_apply_dense`.
distributed_container = var._distributed_container()
assert distributed_container is not None
if ops.executing_eagerly_outside_functions():
key = distributed_container._unique_id
else:
key = (distributed_container.graph, distributed_container._shared_name)
# pylint: enable=protected-access
mirrored_slot = named_slots.get(key, None)
if mirrored_slot is None: return None
return mirrored_slot._get_on_device_or_primary() # pylint: disable=protected-access
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if variable._in_graph_mode: # pylint: disable=protected-access
return variable.op.graph is current_graph
else:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
# Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
eager = context.executing_eagerly()
graph = None if eager else colocate_with.graph
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
self._maybe_initialize_trackable()
distribution_strategy = distribute_ctx.get_strategy()
with distribution_strategy.extended.colocate_vars_with(colocate_with):
if eager:
restored_initial_value = self._preload_simple_restoration(
name=name)
if restored_initial_value is not None:
initial_value = restored_initial_value
v = variable_scope.variable(
initial_value, name=name, trainable=False,
use_resource=resource_variable_ops.is_resource_variable(
colocate_with))
# Restore this variable by name if necessary, but don't add a
# Trackable dependency. Optimizers return the current graph's
# non-slot variables from _checkpoint_dependencies explicitly rather
# than unconditionally adding dependencies (since there may be multiple
# non-slot variables with the same name in different graphs, trying to
# save all of them would result in errors).
self._handle_deferred_dependencies(name=name, trackable=v)
self._non_slot_dict[key] = v
return v
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
current_graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
for (name, _), variable_object in sorted(self._non_slot_dict.items(),
# Avoid comparing graphs
key=lambda item: item[0][0]):
if variable_object._graph_key == current_graph_key: # pylint: disable=protected-access
current_graph_non_slot_variables.append(
trackable.TrackableReference(
name=name, ref=variable_object))
return (super(Optimizer, self)._checkpoint_dependencies
+ current_graph_non_slot_variables)
def _lookup_dependency(self, name):
"""From Trackable. Find a non-slot variable in the current graph."""
unconditional = super(Optimizer, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
graph = None if context.executing_eagerly() else ops.get_default_graph()
return self._get_non_slot_variable(name, graph=graph)
def _get_non_slot_variable(self, name, graph=None):
non_slot = self._non_slot_dict.get((name, graph), None)
if hasattr(non_slot, "_distributed_container"):
# This is a mirrored non-slot. In order to enable code like `_finish`
# to assign to a non-slot, return the current context replica.
return non_slot.get()
else:
return non_slot
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
# --------------
# For implementing the Trackable interface.
# --------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
named_slots = self._slot_dict(slot_name)
variable_key = _var_key(variable)
slot_variable = named_slots.get(variable_key, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValueCallable(
checkpoint_position=slot_variable_position)
# CheckpointInitialValueCallable will ignore the shape and dtype
# parameters but they must be passed.
slot_variable = self._get_or_make_slot_with_initializer(
var=variable,
initializer=initializer,
shape=variable.shape,
dtype=variable.dtype,
slot_name=slot_name,
op_name=self._name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
|
chuard/FDTD | refs/heads/master | src/__main__.py | 1 | #!/uigelh_dua2/chuard/anaconda3/bin/python
import sys
import configparser
from system import System
import matplotlib.pyplot as plt
import numpy as np
import cProfile as profile
def main(arg):
# read in ini file
cfg = configparser.ConfigParser()
cfg.read(arg)
s = System(cfg)
s.take_steps()
# profile.runctx('s.take_steps()', globals(), locals())
#print last frame
max_val = np.amax(np.absolute(s.m_ez))
print(max_val)
print(s.m_ez[0,0])
fig = plt.figure()
ax = fig.add_subplot(111,aspect='equal')
plt.pcolor(s.m_ez,vmin=-max_val,vmax=max_val,cmap='RdBu_r')
plt.show()
if __name__ == '__main__':
main(sys.argv[1])
|
dcos/shakedown | refs/heads/master | tests/acceptance/test_dcos_package_cli.py | 1 | from shakedown import *
def test_install_package_cli():
assert not package_installed('dcos-enterprise-cli')
install_package_and_wait('dcos-enterprise-cli')
assert package_installed('dcos-enterprise-cli')
def test_uninstall_package_cli():
assert package_installed('dcos-enterprise-cli')
uninstall_package_and_wait('dcos-enterprise-cli')
assert not package_installed('dcos-enterprise-cli')
|
aozima/RealBoard4088 | refs/heads/master | software/rtthread_examples/examples/5_logtrace/rtconfig.py | 38 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
# get setting from environment.
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:/Program Files/CodeSourcery/arm-none-eabi/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'E:/Keil'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
IAR_PATH = r'C:/Program Files/IAR Systems/Embedded Workbench 6.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-lpc40x.map,-cref,-u,Reset_Handler -T lpc40xx_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-lpc40xx.map --scatter lpc40xx_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu None'
LFLAGS = ' --config lpc40xx_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
|
SPxiaomin/NodeJs_Practice | refs/heads/master | Express4_3/nodejs-demo/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/inferno.py | 52 | # -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
|
ptdtan/Ragout | refs/heads/master | ragout/synteny_backend/hal.py | 1 | #(c) 2013-2014 by Authors
#This file is a part of Ragout program.
#Released under the BSD license (see LICENSE file)
"""
This module works with MAF input and converts it into synteny blocks
"""
import os
import logging
import shutil
import multiprocessing
import subprocess
from .synteny_backend import SyntenyBackend, BackendException
import ragout.maf2synteny.maf2synteny as m2s
from ragout.shared import config
from ragout.shared import utils
logger = logging.getLogger()
HAL_WORKDIR = "hal-workdir"
HAL2MAF = "hal2mafMP.py"
HAL2FASTA = "hal2fasta"
HAL_STATS = "halStats"
TARGET_FASTA = "target.fasta"
ANCESTOR_FASTA = "ancestor.fasta"
class HalBackend(SyntenyBackend):
def __init__(self):
SyntenyBackend.__init__(self)
def infer_block_scale(self, recipe):
hal = recipe.get("hal")
if not hal or not os.path.exists(hal):
raise BackendException("Could not open HAL file "
"or it is not specified")
stats = subprocess.check_output([HAL_STATS, hal])
size = 0
for line in stats.splitlines():
tokens = line.split(",")
if tokens[0] == recipe["target"]:
size = int(tokens[2])
if size < config.vals["big_genome_threshold"]:
return "small"
else:
return "large"
def run_backend(self, recipe, output_dir, overwrite, ancestral = False):
workdir = os.path.join(output_dir, HAL_WORKDIR)
if overwrite and os.path.isdir(workdir):
shutil.rmtree(workdir)
if "hal" not in recipe or not os.path.exists(recipe["hal"]):
raise BackendException("Could not open HAL file "
"or it is not specified")
files = {}
#using existing results
if os.path.isdir(workdir):
logger.warning("Using synteny blocks from previous run")
logger.warning("Use --overwrite to force alignment")
all_good = True
for block_size in self.blocks:
block_dir = os.path.join(workdir, str(block_size))
coords_file = os.path.join(block_dir, "blocks_coords.txt")
if not os.path.isfile(coords_file):
all_good = False
break
files[block_size] = os.path.abspath(coords_file)
target_fasta = os.path.join(workdir, TARGET_FASTA)
ancestor_fasta = os.path.join(workdir, ANCESTOR_FASTA)
if not os.path.isfile(target_fasta):
all_good = False
else:
self.target_fasta = target_fasta
if "ancestor" in recipe and not os.path.isfile(ancestor_fasta):
all_good = False
else:
self.ancestor_fasta = ancestor_fasta
if not all_good:
raise BackendException("Exitsing results are incompatible "
"with current run")
else:
os.mkdir(workdir)
logger.info("Extracting target FASTA from HAL")
target_fasta = os.path.join(workdir, TARGET_FASTA)
cmdline = [HAL2FASTA, recipe["hal"], recipe["target"],
"--inMemory"]
subprocess.check_call(cmdline, stdout=open(target_fasta, "w"))
if ancestral:
logger.info("Extracting ancestor FASTA from HAL")
ancestor_fasta = os.path.join(workdir, ANCESTOR_FASTA)
cmdline = [HAL2FASTA, recipe["hal"], recipe["ancestor"],
"--inMemory"]
subprocess.check_call(cmdline, stdout=open(ancestor_fasta, "w"))
self.target_fasta = target_fasta
if ancestor_fasta:
self.ancestor_fasta = ancestor_fasta
else:
self.ancestor_fasta = None
logger.info("Converting HAL to MAF")
out_maf = os.path.join(workdir, "alignment.maf")
ref_genome = recipe["target"] #Tricky notation, huh?
export_genomes = ",".join(recipe["genomes"])
cmdline = [HAL2MAF, recipe["hal"], out_maf,
"--numProc", str(self.threads), "--refGenome",
ref_genome, "--targetGenomes", export_genomes,
"--inMemory"]
#removed --noAncestor parameter
logger.debug(" ".join(cmdline))
subprocess.check_call(cmdline, stdout=open(os.devnull, "w"))
logger.info("Extracting synteny blocks from MAF")
if not m2s.make_synteny(out_maf, workdir, self.blocks):
raise BackendException("Something went wrong with maf2synteny")
for block_size in self.blocks:
block_dir = os.path.join(workdir, str(block_size))
coords_file = os.path.join(block_dir, "blocks_coords.txt")
files[block_size] = os.path.abspath(coords_file)
if not os.path.exists(coords_file):
raise BackendException("Something bad happened!")
return files
if utils.which(HAL2MAF) and utils.which(HAL2FASTA) and utils.which(HAL_STATS):
SyntenyBackend.register_backend("hal", HalBackend())
|
vprime/puuuu | refs/heads/master | env/lib/python2.7/site-packages/Crypto/SelfTest/Cipher/test_CAST.py | 119 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/CAST.py: Self-test for the CAST-128 (CAST5) cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.CAST"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors from RFC 2144, B.1
('0123456789abcdef', '238b4fe5847e44b2',
'0123456712345678234567893456789a',
'128-bit key'),
('0123456789abcdef', 'eb6a711a2c02271b',
'01234567123456782345',
'80-bit key'),
('0123456789abcdef', '7ac816d16e9b302e',
'0123456712',
'40-bit key'),
]
def get_tests(config={}):
from Crypto.Cipher import CAST
from common import make_block_tests
return make_block_tests(CAST, "CAST", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
bachtk/linux | refs/heads/master | tools/perf/util/setup.py | 766 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
piyush82/icclab-rcb-web | refs/heads/master | virtualenv/lib/python2.7/site-packages/pip/vendor/html5lib/sanitizer.py | 805 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
yordan-desta/QgisIns | refs/heads/master | python/plugins/processing/algs/otb/__init__.py | 12133432 | |
rogerhu/django | refs/heads/master | tests/validators/models.py | 12133432 | |
Deepakkothandan/ansible | refs/heads/devel | lib/ansible/modules/cloud/lxc/__init__.py | 12133432 | |
0Chencc/CTFCrackTools | refs/heads/master | Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py | 346 | from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__('_CallbackFileWrapper__fp')
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b'\r\n':
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
|
carlrobert/tools | refs/heads/master | js2prop.py | 2 | # -*- coding: utf-8 -*-
# js2prop.py: App Inventor 2 (AI2) translation helper
# Split _messages.js into a preamble, a properties file (usable with po2prop) and a postamble
#
# The properties are represented as assignments in the .js file:
# Blockly.Msg.LANG_LISTS_LOOKUP_IN_PAIRS_INPUT = "parvis uppslagning nyckel %1 par %2 hittadesInte %3"
#
# In the .properties file this corresponds to
# Blockly.Msg.LANG_LISTS_LOOKUP_IN_PAIRS_INPUT = parvis uppslagning nyckel %1 par %2 hittadesInte %3
#
# See https://github.com/mit-cml/appinventor-sources/blob/master/appinventor/blocklyeditor/src/msg/sv/_messages.js
# Thanks to https://jis.qyv.name/ for helping out
import re
from sys import argv
def quote_leading_space(s):
if len(s) == 0 or s[0] != ' ':
return s
return '\\' + s
def main():
script, filename = argv
files = ['1-pre-' + filename, '2-' + filename + '.properties', '3-post-' + filename]
preamble, properties, postamble = 0, 1, 2
fileCount = preamble
target = open(files[fileCount], 'w')
target.truncate()
p = re.compile(r'(Blockly[^\s=]+)\s=\s\"(.*)"$', re.IGNORECASE)
with open(filename, encoding='utf-8') as inf:
for line in inf:
if fileCount == preamble:
m = p.search(line)
if m:
target.close()
fileCount = properties
target = open(files[fileCount], 'w', encoding='utf-8')
target.truncate()
else:
target.write(line)
if fileCount == properties:
m = p.search(line)
if m:
target.write(''.join([m.group(1), ' = ', quote_leading_space(m.group(2)), '\n']))
else:
target.close()
fileCount = postamble
target = open(files[fileCount], 'w')
target.truncate()
if fileCount == postamble:
target.write(line)
if __name__ == "__main__":
main()
|
DarioGT/OMS-PluginXML | refs/heads/master | org.modelsphere.sms/lib/jython-2.2.1/Lib/test/test_sax.py | 1 | # -*- coding: iso-8859-1 -*-
# regression test for SAX 2.0
# $Id: test_sax.py,v 1.13 2004/03/20 07:46:04 fdrake Exp $
from xml.sax import handler, make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise ImportError("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
XMLFilterBase, Location
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
from test.test_support import verbose, TestFailed, findfile
# ===== Utilities
tests = 0
failures = []
def confirm(outcome, name):
global tests
tests = tests + 1
if outcome:
if verbose:
print "Passed", name
else:
print "Failed", name
failures.append(name)
def test_make_parser2():
try:
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
except:
return 0
else:
return p
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
# ===== escape
def test_escape_basic():
return escape("Donald Duck & Co") == "Donald Duck & Co"
def test_escape_all():
return escape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_escape_extra():
return escape("Hei på deg", {"å" : "å"}) == "Hei på deg"
# ===== unescape
def test_unescape_basic():
return unescape("Donald Duck & Co") == "Donald Duck & Co"
def test_unescape_all():
return unescape("<Donald Duck & Co>") == "<Donald Duck & Co>"
def test_unescape_extra():
return unescape("Hei på deg", {"å" : "å"}) == "Hei på deg"
def test_unescape_amp_extra():
return unescape("&foo;", {"&foo;": "splat"}) == "&foo;"
# ===== quoteattr
def test_quoteattr_basic():
return quoteattr("Donald Duck & Co") == '"Donald Duck & Co"'
def test_single_quoteattr():
return (quoteattr('Includes "double" quotes')
== '\'Includes "double" quotes\'')
def test_double_quoteattr():
return (quoteattr("Includes 'single' quotes")
== "\"Includes 'single' quotes\"")
def test_single_double_quoteattr():
return (quoteattr("Includes 'single' and \"double\" quotes")
== "\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser():
try:
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
except:
return 0
else:
return p
# ===== XMLGenerator
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
def test_xmlgen_basic():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc></doc>"
def test_xmlgen_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>huhei</doc>"
def test_xmlgen_escaped_content():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters(unicode("\xa0\\u3042", "unicode-escape"))
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc>\xa0あ</doc>"
def test_xmlgen_escaped_attr():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"x": unicode("\\u3042", "unicode-escape")})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + '<doc x="あ"></doc>'
def test_xmlgen_pi():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<?test data?><doc></doc>"
def test_xmlgen_content_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc><huhei&</doc>"
def test_xmlgen_attr_escape():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start \
+ "<doc a='\"'><e a=\"'\"></e><e a=\"'"\"></e></doc>"
def test_xmlgen_attr_escape_manydouble():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"\'"'})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc a='\"'\"'></doc>"
def test_xmlgen_attr_escape_manysingle():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": "'\"'"})
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + '<doc a="\'"\'"></doc>'
def test_xmlgen_ignorable():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
return result.getvalue() == start + "<doc> </doc>"
ns_uri = "http://www.python.org/xml-ns/saxtest/"
def test_xmlgen_ns():
result = StringIO()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
return result.getvalue() == start + \
('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri)
# ===== XMLFilterBase
def test_filter_basic():
result = StringIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
return result.getvalue() == start + "<doc>content </doc>"
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
# ===== XMLReader support
def test_expat_file():
parser = make_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(findfile("test.xml")))
return result.getvalue() == xml_test_out
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler():
parser = make_parser()
handler = TestDTDHandler()
parser.setDTDHandler(handler)
parser.parse(StringIO('''<!DOCTYPE doc [
<!ENTITY img SYSTEM "expat.gif" NDATA GIF>
<!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">
]>
<doc></doc>'''))
if len(handler._entities) != 1 or len(handler._entities[0]) != 4:
return 0
name, pubId, sysId, ndata = handler._entities[0]
if name != 'img' or not pubId is None or not sysId.endswith('expat.gif') or ndata != 'GIF':
return 0
return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)]
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(StringIO("<entity/>"))
return inpsrc
def test_expat_entityresolver():
parser = make_parser()
parser.setEntityResolver(TestEntityResolver())
result = StringIO()
parser.setContentHandler(XMLGenerator(result))
parser.parse(StringIO('''<!DOCTYPE doc [
<!ENTITY test SYSTEM "whatever">
]>
<doc>&test;</doc>'''))
return result.getvalue() == start + "<doc><entity></entity></doc>"
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty():
parser = make_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.parse(StringIO("<doc/>"))
return verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr():
parser = make_parser()
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.parse(StringIO("<doc attr='val'/>"))
return verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty():
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.parse(StringIO("<doc/>"))
return verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr():
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
gather = AttrGatherer()
parser.setContentHandler(gather)
parser.parse(StringIO("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri))
attrs = gather._attrs
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs[(ns_uri, "attr")] == "val"
# ===== InputSource support
xml_test_out = open(findfile("test.xml.out")).read()
def test_expat_inpsource_filename():
parser = make_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
return result.getvalue() == xml_test_out
def test_expat_inpsource_sysid():
parser = make_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(findfile("test.xml")))
return result.getvalue() == xml_test_out
def test_expat_inpsource_stream():
parser = make_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
inpsrc.setByteStream(open(findfile("test.xml")))
parser.parse(inpsrc)
return result.getvalue() == xml_test_out
# ===== Locator support
class LocatorTest(XMLGenerator):
def __init__(self, out=None, encoding="iso-8859-1"):
XMLGenerator.__init__(self, out, encoding)
self.location = None
def setDocumentLocator(self, locator):
XMLGenerator.setDocumentLocator(self, locator)
self.location = Location(self._locator)
def test_expat_locator_noinfo():
result = StringIO()
xmlgen = LocatorTest(result)
parser = make_parser()
parser.setContentHandler(xmlgen)
parser.parse(StringIO("<doc></doc>"))
return xmlgen.location.getSystemId() is None and \
xmlgen.location.getPublicId() is None and \
xmlgen.location.getLineNumber() == 1
def test_expat_locator_withinfo():
result = StringIO()
xmlgen = LocatorTest(result)
parser = make_parser()
parser.setContentHandler(xmlgen)
testfile = findfile("test.xml")
parser.parse(testfile)
#In Jython, the system id is a URL with forward slashes, and under Windows
#findfile returns a path with backslashes, so replace the backslashes with
#forward
import os
if os.name == 'java':
testfile = testfile.replace('\\', '/')
return xmlgen.location.getSystemId().endswith(testfile) and \
xmlgen.location.getPublicId() is None
# ===========================================================================
#
# error reporting
#
# ===========================================================================
def test_expat_incomplete():
parser = make_parser()
parser.setContentHandler(ContentHandler()) # do nothing
try:
parser.parse(StringIO("<foo>"))
except SAXParseException:
return 1 # ok, error found
else:
return 0
def test_sax_location_str():
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(Location(DummyLocator(1, 1)))
# use None for the line number:
str(Location(DummyLocator(None, 1)))
# use None for the column number:
str(Location(DummyLocator(1, None)))
# use None for both:
str(Location(DummyLocator(None, None)))
return 1
def test_sax_parse_exception_str():
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(SAXParseException("message", None,
DummyLocator(1, 1)))
# use None for the line number:
str(SAXParseException("message", None,
DummyLocator(None, 1)))
# use None for the column number:
str(SAXParseException("message", None,
DummyLocator(1, None)))
# use None for both:
str(SAXParseException("message", None,
DummyLocator(None, None)))
return 1
class DummyLocator:
def __init__(self, lineno, colno):
self._lineno = lineno
self._colno = colno
def getPublicId(self):
return "pubid"
def getSystemId(self):
return "sysid"
def getLineNumber(self):
return self._lineno
def getColumnNumber(self):
return self._colno
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
# ===== AttributesImpl
def verify_empty_attrs(attrs):
try:
attrs.getValue("attr")
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName("attr")
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs["attr"]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key("attr") and \
attrs.keys() == [] and \
attrs.get("attrs") is None and \
attrs.get("attrs", 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def verify_attrs_wattr(attrs):
return attrs.getLength() == 1 and \
attrs.getNames() == ["attr"] and \
attrs.getQNames() == ["attr"] and \
len(attrs) == 1 and \
attrs.has_key("attr") and \
attrs.keys() == ["attr"] and \
attrs.get("attr") == "val" and \
attrs.get("attr", 25) == "val" and \
attrs.items() == [("attr", "val")] and \
attrs.values() == ["val"] and \
attrs.getValue("attr") == "val" and \
attrs.getValueByQName("attr") == "val" and \
attrs.getNameByQName("attr") == "attr" and \
attrs["attr"] == "val" and \
attrs.getQNameByName("attr") == "attr"
def test_attrs_empty():
return verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr():
return verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
# ===== AttributesImpl
def verify_empty_nsattrs(attrs):
try:
attrs.getValue((ns_uri, "attr"))
gvk = 0
except KeyError:
gvk = 1
try:
attrs.getValueByQName("ns:attr")
gvqk = 0
except KeyError:
gvqk = 1
try:
attrs.getNameByQName("ns:attr")
gnqk = 0
except KeyError:
gnqk = 1
try:
attrs.getQNameByName((ns_uri, "attr"))
gqnk = 0
except KeyError:
gqnk = 1
try:
attrs[(ns_uri, "attr")]
gik = 0
except KeyError:
gik = 1
return attrs.getLength() == 0 and \
attrs.getNames() == [] and \
attrs.getQNames() == [] and \
len(attrs) == 0 and \
not attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [] and \
attrs.get((ns_uri, "attr")) is None and \
attrs.get((ns_uri, "attr"), 25) == 25 and \
attrs.items() == [] and \
attrs.values() == [] and \
gvk and gvqk and gnqk and gik and gqnk
def test_nsattrs_empty():
return verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr():
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
return attrs.getLength() == 1 and \
attrs.getNames() == [(ns_uri, "attr")] and \
attrs.getQNames() == ["ns:attr"] and \
len(attrs) == 1 and \
attrs.has_key((ns_uri, "attr")) and \
attrs.keys() == [(ns_uri, "attr")] and \
attrs.get((ns_uri, "attr")) == "val" and \
attrs.get((ns_uri, "attr"), 25) == "val" and \
attrs.items() == [((ns_uri, "attr"), "val")] and \
attrs.values() == ["val"] and \
attrs.getValue((ns_uri, "attr")) == "val" and \
attrs.getValueByQName("ns:attr") == "val" and \
attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \
attrs[(ns_uri, "attr")] == "val" and \
attrs.getQNameByName((ns_uri, "attr")) == "ns:attr"
# ===== Main program
def make_test_output():
parser = make_parser()
result = StringIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(findfile("test.xml"))
outf = open(findfile("test.xml.out"), "w")
outf.write(result.getvalue())
outf.close()
import sys
java_14 = sys.platform.startswith("java1.4")
del sys
items = locals().items()
items.sort()
for (name, value) in items:
if name.startswith('test_expat') and java_14:
#skip expat tests on java14 since the crimson parser is so crappy
continue
if name[:5] == "test_":
confirm(value(), name)
if verbose:
print "%d tests, %d failures" % (tests, len(failures))
if failures:
raise TestFailed("%d of %d tests failed: %s"
% (len(failures), tests, ", ".join(failures)))
|
FND/tiddlyspace | refs/heads/master | test/test_put_hash.py | 1 | """
Test so-called "friendly" uris: links to tiddlers
in the current space from the root.
"""
from fixtures import make_test_env
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
import httplib2
import simplejson
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.bag import Bag
def setup_module(module):
make_test_env(module)
# we have to have a function that returns the callable,
# Selector just _is_ the callable
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('0.0.0.0', 8080, app_fn)
module.http = httplib2.Http()
def teardown_module(module):
import os
os.chdir('..')
def test_hash():
bag = Bag('one')
store.put(bag)
tiddler = Tiddler('hi', 'one')
tiddler.text = 'fancy'
store.put(tiddler)
tiddler = store.get(tiddler)
assert '_hash' in tiddler.fields
response, content = http.request('http://0.0.0.0:8080/bags/one/tiddlers/hi.json',
method='GET')
assert response['status'] == '200'
info = simplejson.loads(content)
assert info['fields']['_hash'] == tiddler.fields['_hash']
info['text'] = 'not fancy'
body = simplejson.dumps(info)
response, content = http.request('http://0.0.0.0:8080/bags/one/tiddlers/hi',
headers={'Content-type': 'application/json'},
body=body,
method='PUT')
assert response['status'] == '204'
tiddler = Tiddler('hi', 'one')
tiddler = store.get(tiddler)
assert tiddler.text == info['text']
assert tiddler.fields['_hash'] != info['fields']['_hash']
|
ychfan/tensorflow | refs/heads/master | tensorflow/tools/docs/generate.py | 29 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib
if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
# tensorflow/, we can compute the base directory (two levels up), which is
# valid unless we're trying to apply this to a different code base, or are
# moving the script around.
script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
default_base_dir = os.path.join(script_dir, '..', '..')
doc_generator.add_base_dir_argument(default_base_dir)
flags = doc_generator.parse_known_args()
# Suppress documentation of some symbols that users should never use.
del tf.layers.Layer.inbound_nodes
del tf.layers.Layer.outbound_nodes
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
sys.exit(doc_generator.build(flags))
|
truemped/dopplr | refs/heads/master | vows/solr/grouping_vows.py | 1 | # vim: set fileencoding=utf-8 :
#
# Copyright (c) 2013 Daniel Truemper <truemped at googlemail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from pyvows import Vows, expect
from dopplr.solr.query import ResultGrouping
@Vows.batch
class WhenGroupingResults(Vows.Context):
class WithNoParametersExceptTheField(Vows.Context):
def topic(self):
field = "my_field"
return ResultGrouping(field).get_params()
def mustIncludeGroupingParameter(self, topic):
expect(topic).to_include(('group', 'true'))
def mustIncludeTheCorrectField(self, topic):
expect(topic).to_include(('group.field', 'my_field'))
class WithManyAdditionalParameters(WithNoParametersExceptTheField):
def topic(self):
field = "my_field"
return ResultGrouping(field, limit=10, offset=20,
query="field:value").get_params()
def mustIncludeTheCorrectParameters(self, topic):
expect(topic).to_include(('group.limit', '10'))
expect(topic).to_include(('group.offset', '20'))
expect(topic).to_include(('group.query', 'field:value'))
expect(topic).to_length(5)
|
akhmadMizkat/odoo | refs/heads/master | addons/google_account/controllers/__init__.py | 7372 | import main
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.