code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from mock import patch
from nose.tools import assert_equals
from prompter import prompt
@patch('prompter.get_input', return_value='Dave')
def test_prompt_returns_input(mock_raw_input):
returned_value = prompt('What is your name?')
assert_equals(returned_value, 'Dave')
@patch('prompter.get_input', return_value='')
def test_prompt_returns_default_for_blank_input(mock_raw_input):
returned_value = prompt('What is your name?', default='Dave')
assert_equals(returned_value, 'Dave')
@patch('prompter.get_input', return_value=' Dave ')
def test_prompt_strips_whitespace(mock_raw_input):
returned_value = prompt('What is your name?')
assert_equals(returned_value, 'Dave')
@patch('prompter.get_input', return_value=' Dave ')
def test_prompt_does_not_strip_whitespace(mock_raw_input):
returned_value = prompt('What is your name?', strip=False)
assert_equals(returned_value, ' Dave ')
@patch('prompter.get_input', return_value=' ')
def test_prompt_returns_default_with_only_whitespace_input(mock_raw_input):
returned_value = prompt('What is your name?', default='Dave')
assert_equals(returned_value, 'Dave')
| tylerdave/prompter | tests/test_prompt.py | Python | mit | 1,159 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import unittest
import mock
from rally.cmd import envutils
from tests.functional import utils
class DeploymentTestCase(unittest.TestCase):
def setUp(self):
super(DeploymentTestCase, self).setUp()
self.rally = utils.Rally()
def test_create_fromenv_list_show(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
self.assertIn("t_create_env", self.rally("deployment list"))
self.assertIn(utils.TEST_ENV["OS_AUTH_URL"],
self.rally("deployment show"))
def test_create_fromfile(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
with open("/tmp/.tmp.deployment", "w") as f:
f.write(self.rally("deployment config"))
self.rally("deployment create --name t_create_file "
"--filename /tmp/.tmp.deployment")
self.assertIn("t_create_file", self.rally("deployment list"))
def test_config(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
config = json.loads(self.rally("deployment config"))
self.assertEqual(utils.TEST_ENV["OS_USERNAME"],
config["admin"]["username"])
self.assertEqual(utils.TEST_ENV["OS_PASSWORD"],
config["admin"]["password"])
self.assertEqual(utils.TEST_ENV["OS_TENANT_NAME"],
config["admin"]["tenant_name"])
self.assertEqual(utils.TEST_ENV["OS_AUTH_URL"],
config["auth_url"])
def test_destroy(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
self.assertIn("t_create_env", self.rally("deployment list"))
self.rally("deployment destroy")
self.assertNotIn("t_create_env", self.rally("deployment list"))
def test_check_success(self):
self.assertTrue(self.rally("deployment check"))
def test_check_fail(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
self.assertRaises(utils.RallyCmdError, self.rally,
("deployment check"))
def test_recreate(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
self.rally("deployment create --name t_create_env --fromenv")
self.rally("deployment recreate --deployment t_create_env")
self.assertIn("t_create_env", self.rally("deployment list"))
def test_use(self):
with mock.patch.dict("os.environ", utils.TEST_ENV):
output = self.rally(
"deployment create --name t_create_env1 --fromenv")
uuid = re.search(r"Using deployment: (?P<uuid>[0-9a-f\-]{36})",
output).group("uuid")
self.rally("deployment create --name t_create_env2 --fromenv")
self.rally("deployment use --deployment %s" % uuid)
current_deployment = envutils.get_global("RALLY_DEPLOYMENT")
self.assertEqual(uuid, current_deployment) | varunarya10/rally | tests/functional/test_cli_deployment.py | Python | apache-2.0 | 3,966 |
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Ctypes classes for record data types in lf.win.ole.ps.dtypes"""
# local imports
from lf.win.ole.ps.dtypes import (
PropertySetStreamHeader, PropertySetHeader,
PropertyIdentifierAndOffset, DictionaryEntryHeader, ArrayHeader,
ArrayDimension,
TypedPropertyValueHeader, TypedPropertyValue_VT_I2,
TypedPropertyValue_VT_R4, TypedPropertyValue_VT_R8,
TypedPropertyValue_VT_CY, TypedPropertyValue_VT_DATE,
TypedPropertyValue_VT_ERROR, TypedPropertyValue_VT_UI2,
TypedPropertyValue_VT_DECIMAL, TypedPropertyValue_VT_I1,
TypedPropertyValue_VT_UI1, TypedPropertyValue_VT_UI4,
TypedPropertyValue_VT_I8, TypedPropertyValue_VT_UI8,
TypedPropertyValue_VT_I4, TypedPropertyValue_VT_FILETIME,
TypedPropertyValue_VT_CLSID
)
__docformat__ = "restructuredtext en"
__all__ = [
"property_set_stream_header", "property_set_header",
"property_identifier_and_offset", "dictionary_entry_header",
"array_header", "array_dimension",
"typed_property_value_header", "typed_property_value_vt_i2",
"typed_property_value_vt_r4", "typed_property_value_vt_r8",
"typed_property_value_vt_cy", "typed_property_value_vt_date",
"typed_property_value_vt_error", "typed_property_value_vt_ui2",
"typed_property_value_vt_decimal", "typed_property_value_vt_i1",
"typed_property_value_vt_ui1", "typed_property_value_vt_ui4",
"typed_property_value_vt_i8", "typed_property_value_vt_ui8",
"typed_property_value_vt_i4", "typed_property_value_vt_filetime",
"typed_property_value_vt_clsid"
]
property_set_stream_header = PropertySetStreamHeader._ctype_
property_set_header = PropertySetHeader._ctype_
property_identifier_and_offset = PropertyIdentifierAndOffset._ctype_
dictionary_entry_header = DictionaryEntryHeader._ctype_
array_header = ArrayHeader._ctype_
array_dimension = ArrayDimension._ctype_
typed_property_value_header = TypedPropertyValueHeader._ctype_
typed_property_value_vt_i2 = TypedPropertyValue_VT_I2._ctype_
typed_property_value_vt_r4 = TypedPropertyValue_VT_R4._ctype_
typed_property_value_vt_r8 = TypedPropertyValue_VT_R8._ctype_
typed_property_value_vt_cy = TypedPropertyValue_VT_CY._ctype_
typed_property_value_vt_date = TypedPropertyValue_VT_DATE._ctype_
typed_property_value_vt_error = TypedPropertyValue_VT_ERROR._ctype_
typed_property_value_vt_ui2 = TypedPropertyValue_VT_UI2._ctype_
typed_property_value_vt_decimal = TypedPropertyValue_VT_DECIMAL._ctype_
typed_property_value_vt_i1 = TypedPropertyValue_VT_I1._ctype_
typed_property_value_vt_ui1 = TypedPropertyValue_VT_UI1._ctype_
typed_property_value_vt_ui4 = TypedPropertyValue_VT_UI4._ctype_
typed_property_value_vt_i8 = TypedPropertyValue_VT_I8._ctype_
typed_property_value_vt_ui8 = TypedPropertyValue_VT_UI8._ctype_
typed_property_value_vt_i4 = TypedPropertyValue_VT_I4._ctype_
typed_property_value_vt_filetime = TypedPropertyValue_VT_FILETIME._ctype_
typed_property_value_vt_clsid = TypedPropertyValue_VT_CLSID._ctype_
| 306777HC/libforensics | code/lf/win/ole/ps/ctypes.py | Python | lgpl-3.0 | 3,684 |
#!/usr/bin/python
#
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_functionapp_facts
version_added: "2.4"
short_description: Get Azure Function App facts
description:
- Get facts for one Azure Function App or all Function Apps within a resource group
options:
name:
description:
- Only show results for a specific Function App
resource_group:
description:
- Limit results to a resource group. Required when filtering by name
aliases:
- resource_group_name
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Thomas Stringer (@trstringer)"
'''
EXAMPLES = '''
- name: Get facts for one Function App
azure_rm_functionapp_facts:
resource_group: myResourceGroup
name: myfunctionapp
- name: Get facts for all Function Apps in a resource group
azure_rm_functionapp_facts:
resource_group: myResourceGroup
- name: Get facts for all Function Apps by tags
azure_rm_functionapp_facts:
tags:
- testing
'''
RETURN = '''
azure_functionapps:
description: List of Azure Function Apps dicts
returned: always
type: list
example:
id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
name: myfunctionapp
kind: functionapp
location: East US
type: Microsoft.Web/sites
state: Running
host_names:
- myfunctionapp.azurewebsites.net
repository_site_name: myfunctionapp
usage_state: Normal
enabled: true
enabled_host_names:
- myfunctionapp.azurewebsites.net
- myfunctionapp.scm.azurewebsites.net
availability_state: Normal
host_name_ssl_states:
- name: myfunctionapp.azurewebsites.net
ssl_state: Disabled
host_type: Standard
- name: myfunctionapp.scm.azurewebsites.net
ssl_state: Disabled
host_type: Repository
server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
reserved: false
last_modified_time_utc: 2017-08-22T18:54:01.190Z
scm_site_also_stopped: false
client_affinity_enabled: true
client_cert_enabled: false
host_names_disabled: false
outbound_ip_addresses: ............
container_size: 1536
daily_memory_time_quota: 0
resource_group: myResourceGroup
default_host_name: myfunctionapp.azurewebsites.net
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMFunctionAppFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str', aliases=['resource_group_name']),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_functionapps=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMFunctionAppFacts, self).__init__(
self.module_arg_spec,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_functionapps'] = self.get_functionapp()
elif self.resource_group:
self.results['ansible_facts']['azure_functionapps'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_functionapps'] = self.list_all()
return self.results
def get_functionapp(self):
self.log('Get properties for Function App {0}'.format(self.name))
function_app = None
result = []
try:
function_app = self.web_client.web_apps.get(
self.resource_group,
self.name
)
except CloudError:
pass
if function_app and self.has_tags(function_app.tags, self.tags):
result = function_app.as_dict()
return [result]
def list_resource_group(self):
self.log('List items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def list_all(self):
self.log('List all items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def main():
AzureRMFunctionAppFacts()
if __name__ == '__main__':
main()
| tersmitten/ansible | lib/ansible/modules/cloud/azure/azure_rm_functionapp_facts.py | Python | gpl-3.0 | 6,027 |
import argparse
import subprocess as sub
import sys
from termcolor import colored
def createarguments():
"""
Function to create arguments that can be passed to the main function
"""
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--new', help="Downoad a new verion of Pharo", required=False, action="store_true")
return parser
def move_to_old():
"""
Move current pharo to "backup directory"
"""
print colored("Mooving Pharo to Pharo-old...", 'blue')
sub.call("rm -rf ~/Pharo-old", shell=True)
sub.call("mv -f ~/Pharo/ ~/Pharo-old", shell=True)
sub.call("rm -rf ~/Pharo", shell=True)
def download_new():
"""
Download new version of pharo
"""
print colored("Downloading Pharo zeroconf script", 'blue')
sub.call("mkdir ~/Pharo", shell=True)
sub.call("cd ~/Pharo && wget -O- get.pharo.org/30+vmLatest | bash", shell=True)
def set_memory():
"""
Increase the maximum memory
"""
sub.call("sed -i '' 's/536870912/900000000/g' ~/Pharo/pharo-vm/Pharo.app/Contents/Info.plist", shell=True)
def start_pharo():
"""
Starting Pharo
"""
print colored("Starting pharo...", 'blue')
sub.call("~/Pharo/pharo-ui ~/Pharo/Pharo.image &", shell=True)
def main():
global quietmode
parser = createarguments()
args = vars(parser.parse_args())
if args['new']:
move_to_old()
download_new()
set_memory()
start_pharo()
if __name__ == "__main__":
main()
| LorenzoBaracchi/pharo-startup | PharoStartupAndConfs/startPharo.py | Python | gpl-3.0 | 1,395 |
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Define relation between records and buckets."""
from __future__ import absolute_import
from invenio_db import db
from invenio_pidstore.models import RecordIdentifier
from invenio_records.models import RecordMetadataBase
class TemplateIdentifier(RecordIdentifier):
"""Sequence generator for templates identifiers."""
__tablename__ = 'template_id'
__mapper_args__ = {'concrete': True}
recid = db.Column(
db.BigInteger().with_variant(db.Integer, 'sqlite'),
primary_key=True, autoincrement=True,
)
class TemplateMetadata(db.Model, RecordMetadataBase):
"""Template record metadata."""
__tablename__ = 'template_metadata'
| rero/reroils-app | rero_ils/modules/templates/models.py | Python | gpl-2.0 | 1,343 |
"""Reload file from disk automatically."""
from __future__ import annotations
from porcupine import get_tab_manager, tabs
# TODO: should cursor and scrolling stuff be a part of reload() or change_batch()?
def reload_if_necessary(tab: tabs.FileTab) -> None:
if tab.other_program_changed_file():
cursor_pos = tab.textwidget.index("insert")
scroll_fraction = tab.textwidget.yview()[0]
if tab.reload():
tab.textwidget.mark_set("insert", cursor_pos)
tab.textwidget.yview_moveto(scroll_fraction)
def on_new_filetab(tab: tabs.FileTab) -> None:
tab.bind("<<FileSystemChanged>>", (lambda e: reload_if_necessary(tab)), add=True)
def setup() -> None:
get_tab_manager().add_filetab_callback(on_new_filetab)
| Akuli/porcupine | porcupine/plugins/reload.py | Python | mit | 762 |
from elasticsearch_dsl import Boolean, Document, Integer, Keyword, Text, GeoPoint, Float, Nested
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['localhost'])
class Listing(Document):
"""Base class containing the common fields."""
access = Text()
additional_house_rules = Text()
allows_events = Boolean()
amenities = Keyword(multi=True)
amenity_ids = Keyword(multi=True)
avg_rating = Float()
bathrooms = Float()
bedrooms = Integer()
beds = Integer()
business_travel_ready = Boolean()
city = Text(fields={'keyword': Keyword()}, required=True)
country = Text(fields={'keyword': Keyword()}, required=True)
coordinates = GeoPoint()
description = Text()
host_id = Integer(fields={'keyword': Keyword()})
house_rules = Text()
interaction = Text()
is_hotel = Boolean()
monthly_price_factor = Float()
name = Text(fields={'keyword': Keyword()}, required=True)
neighborhood_overview = Text()
person_capacity = Integer()
photo_count = Integer()
photos = Keyword(multi=True)
place_id = Text(fields={'keyword': Keyword()})
price_rate = Float()
price_rate_type = Text(fields={'keyword': Keyword()}, required=True)
province = Text(fields={'keyword': Keyword()})
rating_accuracy = Float()
rating_checkin = Float()
rating_cleanliness = Float()
rating_communication = Float()
rating_location = Float()
rating_value = Float()
review_count = Integer()
reviews = Nested()
room_and_property_type = Text(fields={'keyword': Keyword()}, required=True)
room_type = Text(fields={'keyword': Keyword()}, required=True)
room_type_category = Text(fields={'keyword': Keyword()}, required=True)
satisfaction_guest = Float()
star_rating = Float()
state = Text(fields={'keyword': Keyword()}, required=True)
transit = Text()
url = Text(fields={'keyword': Keyword()}, required=True)
weekly_price_factor = Float()
| bashedev/airbnb_scraper | deepbnb/model.py | Python | gpl-3.0 | 2,005 |
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="heatmapgl.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/heatmapgl/hoverlabel/_font.py | Python | mit | 1,875 |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
from distutils.core import setup, Extension
module1 = Extension(
'dummy',
sources=['dummy.cpp', 'SerialUtils.cc', 'StringUtils.cc'],
extra_compile_args=["-O3"]
)
setup(
name='Dummy',
version='1.0',
description='This is a dummy package',
ext_modules=[module1]
)
| elzaggo/pydoop | test/timings/setup.py | Python | apache-2.0 | 900 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
r"""Determine information about text files.
This module efficiently determines the encoding of text files (see
_classify_encoding for details), accurately identifies binary files, and
provides detailed meta information of text files.
>>> import textinfo
>>> path = __file__
>>> if path.endswith(".pyc"): path = path[:-1]
>>> ti = textinfo.textinfo_from_path(path)
>>> ti.__class__
<class 'textinfo.TextInfo'>
>>> ti.encoding
'utf-8'
>>> ti.file_type_name
'regular file'
>>> ti.is_text
True
>>> ti.lang
'Python'
>>> ti.langinfo
<Python LangInfo>
...plus a number of other useful information gleaned from the file. To see
a list of all useful attributes see
>> list(ti.as_dict().keys())
['encoding', 'file_type', ...]
Note: This module requires at least Python 2.5 to use
`codecs.lookup(<encname>).name`.
"""
_cmdln_doc = """Determine information about text files.
"""
# TODO:
# - [high prio] prefs integration
# - aggegrate "is there an explicit encoding decl in this file" from XML, HTML,
# lang-specific, emacs and vi vars decls (as discussed with Shane)
# - fix ti with unicode paths Windows (check on Linux too)
# - '-L|--dereference' option a la `file` and `ls`
# - See: http://webblaze.cs.berkeley.edu/2009/content-sniffing/
# - Shift-JIS encoding is not detected for
# http://public.activestate.com/pub/apc/perl-current/lib/Pod/Simple/t/corpus/s2763_sjis.txt
# [Jan wrote]
# > While the document isn't identified by filename extension as POD,
# > it does contain POD and a corresponding =encoding directive.
# Could potentially have a content heuristic check for POD.
#
# ----------------
# Current Komodo (4.2) Encoding Determination Notes (used for reference,
# but not absolutely followed):
#
# Working through koDocumentBase._detectEncoding:
# encoding_name = pref:encodingDefault (on first start is set
# to encoding from locale.getdefaultlocale() typically,
# fallback to iso8859-1; default locale typically ends up being:
# Windows: cp1252
# Mac OS X: mac-roman
# (modern) Linux: UTF-8)
# encoding = the python name for this
# tryencoding = pref:encoding (no default, explicitly set
# encoding) -- i.e. if there are doc prefs for this
# path, then give this encoding a try. If not given,
# then utf-8 for XML/XSLT/VisualBasic and
# pref:encodingDefault for others (though this is
# all prefable via the 'languages' pref struct).
# tryxmldecl
# trymeta (HTML meta)
# trymodeline
# autodetect (whether to try at all)
#
# if autodetect or tryencoding:
# koUnicodeEncoding.autoDetectEncoding()
# else:
# if encoding.startswith('utf'): # note this is pref:encodingDefault
# check bom
# presume encoding is right (give up if conversion fails)
# else:
# presume encoding is right (given up if fails)
#
# Working through koUnicodeEncoding.autoDetectEncoding:
# if tryxmldecl: ...
# if tryhtmlmeta: ...
# if trymodeline: ...
# use bom: ...
# ----------------
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
from os.path import join, dirname, abspath, basename, exists
import sys
import re
from pprint import pprint
import traceback
import warnings
import logging
import optparse
import codecs
import locale
import langinfo
#---- exceptions and warnings
class TextInfoError(Exception):
pass
class TextInfoConfigError(TextInfoError):
pass
class ChardetImportWarning(ImportWarning):
pass
warnings.simplefilter("once", ChardetImportWarning)
#---- globals
log = logging.getLogger("textinfo")
# For debugging:
DEBUG_CHARDET_INFO = False # gather chardet info
def _to_str(s):
s = '%s' % s
if s.startswith("b'") and s.endswith("'") or s.startswith('b"') and s.endswith('"'):
return s[2:-1]
return s
#---- module API
def textinfo_from_filename(path):
"""Determine test info for the given path **using the filename only**.
No attempt is made to stat or read the file.
"""
return TextInfo.init_from_filename(path)
def textinfo_from_path(path, encoding=None, follow_symlinks=False,
quick_determine_lang=False):
"""Determine text info for the given path.
This raises EnvironmentError if the path doesn't not exist or could
not be read.
"""
return TextInfo.init_from_path(path, encoding=encoding,
follow_symlinks=follow_symlinks,
quick_determine_lang=quick_determine_lang)
#---- main TextInfo class
class TextInfo(object):
path = None
file_type_name = None # e.g. "regular file", "directory", ...
file_type = None # stat.S_IFMT(os.stat(path).st_mode)
file_mode = None # stat.S_IMODE(os.stat(path).st_mode)
is_text = None
encoding = None
has_bom = None # whether the text has a BOM (Byte Order Marker)
encoding_bozo = False
encoding_bozo_reasons = None
lang = None # e.g. "Python", "Perl", ...
langinfo = None # langinfo.LangInfo instance or None
# Enable chardet-based heuristic guessing of encoding as a last
# resort for file types known to not be binary.
CHARDET_ENABLED = True
CHARDET_THRESHHOLD = 0.9 # >=90% confidence to avoid false positives.
@classmethod
def init_from_filename(cls, path, lidb=None):
"""Create an instance using only the filename to initialize."""
if lidb is None:
lidb = langinfo.get_default_database()
self = cls()
self.path = path
self._classify_from_filename(lidb)
return self
@classmethod
def init_from_path(cls, path, encoding=None, lidb=None,
follow_symlinks=False,
quick_determine_lang=False,
env=None):
"""Create an instance using the filename and stat/read info
from the given path to initialize.
@param follow_symlinks {boolean} can be set to True to have
the textinfo returned for a symlink be for linked-to file. By
default the textinfo is for the symlink itself.
@param quick_determine_lang {boolean} can be set to True to have
processing stop as soon as the language has been determined.
Note that this means some fields will not be populated.
@param env {runtime environment} A "runtime environment" class
whose behaviour is used to influence processing. Currently
it is just used to provide a hook for lang determination
by filename (for Komodo).
"""
if lidb is None:
lidb = langinfo.get_default_database()
self = cls()
self.path = path
self._accessor = PathAccessor(path, follow_symlinks=follow_symlinks)
try:
# TODO: pref: Is a preference specified for this path?
self._classify_from_stat(lidb)
if self.file_type_name != "regular file":
# Don't continue if not a regular file.
return self
# TODO: add 'pref:treat_as_text' a la TextMate (or
# perhaps that is handled in _classify_from_filename())
self._classify_from_filename(lidb, env)
if self.is_text is False:
return self
if self.lang and quick_determine_lang:
return self
if not self.lang:
self._classify_from_magic(lidb)
if self.is_text is False:
return self
if self.lang and quick_determine_lang:
return self
self._classify_encoding(lidb, suggested_encoding=encoding)
if self.is_text is None and self.encoding:
self.is_text = True
if self.is_text is False:
return self
self.text = self._accessor.text
if self.text: # No `self.text' with current UTF-32 hack.
self._classify_from_content(lidb)
return self
finally:
# Free the memory used by the accessor.
del self._accessor
def __repr__(self):
if self.path:
return "<TextInfo %r>" % self.path
else:
return "<TextInfo %r>"\
% _one_line_summary_from_text(self.content, 30)
def as_dict(self):
return dict((k, v) for k, v in list(self.__dict__.items())
if not k.startswith('_'))
def as_summary(self):
"""One-liner string summary of text info."""
d = self.as_dict()
info = []
if self.file_type_name and self.file_type_name != "regular file":
info.append(self.file_type_name)
else:
info.append(self.lang or "???")
if not self.is_text:
info.append("binary")
elif self.encoding:
enc = self.encoding
if self.has_bom:
enc += " (bom)"
info.append(enc)
if DEBUG_CHARDET_INFO and hasattr(self, "chardet_info") \
and self.chardet_info["encoding"]:
info.append("chardet:%s/%.1f%%"
% (self.chardet_info["encoding"],
self.chardet_info["confidence"] * 100.0))
return "%s: %s" % (self.path, ', '.join(info))
def _classify_from_content(self, lidb):
# TODO: Plan:
# - eol_* attrs (test cases for this!)
head = self.text[:self._accessor.HEAD_SIZE]
tail = self.text[-self._accessor.TAIL_SIZE:]
# If lang is unknown, attempt to guess from XML prolog or
# shebang now that we've successfully decoded the buffer.
if self.langinfo is None:
(self.has_xml_prolog, xml_version,
xml_encoding) = self._get_xml_prolog_info_s(head)
if self.has_xml_prolog:
self.xml_version = xml_version
self.xml_encoding = xml_encoding
self.langinfo = lidb.langinfo_from_lang("XML")
self.lang = self.langinfo.name
elif self.text.startswith("#!"):
li = lidb.langinfo_from_magic(self.text, shebang_only=True)
if li:
self.langinfo = li
self.lang = li.name
# Extract Emacs local vars and Vi(m) modeline info and, if the
# lang is still unknown, attempt to use them to determine it.
self.emacs_vars = self._get_emacs_head_vars_s(head)
self.emacs_vars.update(self._get_emacs_tail_vars_s(tail))
self.vi_vars = self._get_vi_vars_s(head)
if not self.vi_vars:
self.vi_vars = self._get_vi_vars_s(tail)
if self.langinfo is None and "mode" in self.emacs_vars:
li = lidb.langinfo_from_emacs_mode(self.emacs_vars["mode"])
if li:
self.langinfo = li
self.lang = li.name
if self.langinfo is None and "filetype" in self.vi_vars \
or "ft" in self.vi_vars:
vi_filetype = self.vi_vars.get(
"filetype") or self.vi_vars.get("ft")
li = lidb.langinfo_from_vi_filetype(vi_filetype)
if li:
self.langinfo = li
self.lang = li.name
if self.langinfo is not None:
if self.langinfo.conforms_to("XML"):
if not hasattr(self, "has_xml_prolog"):
(self.has_xml_prolog, self.xml_version,
self.xml_encoding) = self._get_xml_prolog_info_s(head)
(self.has_doctype_decl, self.doctype_decl,
self.doctype_name, self.doctype_public_id,
self.doctype_system_id) = self._get_doctype_decl_info_s(head)
# If this is just plain XML, we try to use the doctype
# decl to choose a more specific XML lang.
if self.lang == "XML" and self.has_doctype_decl:
li = lidb.langinfo_from_doctype(
public_id=self.doctype_public_id,
system_id=self.doctype_system_id)
if li and li.name != "XML":
self.langinfo = li
self.lang = li.name
elif self.langinfo.conforms_to("HTML"):
(self.has_doctype_decl, self.doctype_decl,
self.doctype_name, self.doctype_public_id,
self.doctype_system_id) = self._get_doctype_decl_info_s(head)
# Allow promotion to XHTML (or other HTML flavours) based
# on doctype.
if self.lang == "HTML" and self.has_doctype_decl:
li = lidb.langinfo_from_doctype(
public_id=self.doctype_public_id,
system_id=self.doctype_system_id)
if li and li.name != "HTML":
self.langinfo = li
self.lang = li.name
# Look for XML prolog and promote HTML -> XHTML if it
# exists. Note that this wins over a plain HTML doctype.
(self.has_xml_prolog, xml_version,
xml_encoding) = self._get_xml_prolog_info_s(head)
if self.has_xml_prolog:
self.xml_version = xml_version
self.xml_encoding = xml_encoding
if self.lang == "HTML":
li = lidb.langinfo_from_lang("XHTML")
self.langinfo = li
self.lang = li.name
# Attempt to specialize the lang.
if self.langinfo is not None:
li = lidb.specialized_langinfo_from_content(
self.langinfo, self.text)
if li:
self.langinfo = li
self.lang = li.name
def _classify_from_magic(self, lidb):
"""Attempt to classify from the file's magic number/shebang
line, doctype, etc.
Note that this is done before determining the encoding, so we are
working with the *bytes*, not chars.
"""
self.has_bom, bom, bom_encoding = self._get_bom_info()
if self.has_bom:
# If this file has a BOM then, unless something funny is
# happening, this will be a text file encoded with
# `bom_encoding`. We leave that to `_classify_encoding()`.
return
# Without a BOM we assume this is an 8-bit encoding, for the
# purposes of looking at, e.g. a shebang line.
#
# UTF-16 and UTF-32 without a BOM is rare; we won't pick up on,
# e.g. Python encoded as UCS-2 or UCS-4 here (but
# `_classify_encoding()` should catch most of those cases).
head_bytes = self._accessor.head_bytes
li = lidb.langinfo_from_magic(head_bytes)
if li:
log.debug("lang from magic: %s", li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
(has_doctype_decl, doctype_decl, doctype_name, doctype_public_id,
doctype_system_id) = self._get_doctype_decl_info_b(head_bytes)
if has_doctype_decl:
li = lidb.langinfo_from_doctype(public_id=doctype_public_id,
system_id=doctype_system_id)
if li:
log.debug("lang from doctype: %s", li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
def _classify_encoding(self, lidb, suggested_encoding=None):
"""To classify from the content we need to separate text from
binary, and figure out the encoding. This is an imperfect task.
The algorithm here is to go through the following heroics to attempt
to determine an encoding that works to decode the content. If all
such attempts fail, we presume it is binary.
1. Use the BOM, if it has one.
2. Try the given suggested encoding (if any).
3. Check for EBCDIC encoding.
4. Lang-specific (if we know the lang already):
* if this is Python, look for coding: decl and try that
* if this is Perl, look for use encoding decl and try that
* ...
5. XML: According to the XML spec the rule is the XML prolog
specifies the encoding, or it is UTF-8.
6. HTML: Attempt to use Content-Type meta tag. Try the given
charset, if any.
7. Emacs-style "coding" local var.
8. Vi[m]-style "fileencoding" local var.
9. Heuristic checks for UTF-16 without BOM.
10. Give UTF-8 a try, it is a pretty common fallback.
We must do this before a possible 8-bit
`locale.getpreferredencoding()` because any UTF-8 encoded
document will decode with an 8-bit encoding (i.e. will decode,
just with bogus characters).
11. Lang-specific fallback. E.g., UTF-8 for XML, ascii for Python.
12. chardet (http://chardet.feedparser.org/), if CHARDET_ENABLED == True
13. locale.getpreferredencoding()
14. iso8859-1 (in case `locale.getpreferredencoding()` is UTF-8
we must have an 8-bit encoding attempt).
TODO: Is there a worry for a lot of false-positives for
binary files.
Notes:
- A la Universal Feed Parser, if some
supposed-to-be-authoritative encoding indicator is wrong (e.g.
the BOM, the Python 'coding:' decl for Python),
`self.encoding_bozo` is set True and a reason is appended to
the `self.encoding_bozo_reasons` list.
"""
# 1. Try the BOM.
if self.has_bom is not False: # Was set in `_classify_from_magic()`.
self.has_bom, bom, bom_encoding = self._get_bom_info()
if self.has_bom:
self._accessor.strip_bom(bom)
# Python doesn't currently include a UTF-32 codec. For now
# we'll *presume* that a UTF-32 BOM is correct. The
# limitation is that `self.text' will NOT get set
# because we cannot decode it.
if bom_encoding in ("utf-32-le", "utf-32-be") \
or self._accessor.decode(bom_encoding):
log.debug("encoding: encoding from BOM: %r", bom_encoding)
self.encoding = bom_encoding
return
else:
log.debug("encoding: BOM encoding (%r) was *wrong*",
bom_encoding)
self._encoding_bozo(
"BOM encoding (%s) could not decode %s"
% (bom_encoding, self._accessor))
head_bytes = self._accessor.head_bytes
if DEBUG_CHARDET_INFO:
sys.path.insert(0, os.path.expanduser(
"~/tm/check/contrib/chardet"))
import chardet
del sys.path[0]
self.chardet_info = chardet.detect(head_bytes)
# 2. Try the suggested encoding.
if suggested_encoding is not None:
norm_suggested_encoding = _norm_encoding(suggested_encoding)
if self._accessor.decode(suggested_encoding):
self.encoding = norm_suggested_encoding
return
else:
log.debug("encoding: suggested %r encoding didn't work for %s",
suggested_encoding, self._accessor)
# 3. Check for EBCDIC.
# TODO: Not sure this should be included, chardet may be better
# at this given different kinds of EBCDIC.
EBCDIC_MAGIC = '\x4c\x6f\xa7\x94'
if self._accessor.head_4_bytes == EBCDIC_MAGIC:
# This is EBCDIC, but I don't know if there are multiple kinds
# of EBCDIC. Python has a 'ebcdic-cp-us' codec. We'll use
# that for now.
norm_ebcdic_encoding = _norm_encoding("ebcdic-cp-us")
if self._accessor.decode(norm_ebcdic_encoding):
log.debug("EBCDIC encoding: %r", norm_ebcdic_encoding)
self.encoding = norm_ebcdic_encoding
return
else:
log.debug("EBCDIC encoding didn't work for %s",
self._accessor)
# 4. Lang-specific (if we know the lang already).
if self.langinfo and self.langinfo.conformant_attr("encoding_decl_pattern"):
m = self.langinfo.conformant_attr("encoding_decl_pattern") \
.search(head_bytes)
if m:
lang_encoding = m.group("encoding").decode('ascii')
norm_lang_encoding = _norm_encoding(lang_encoding)
if self._accessor.decode(norm_lang_encoding):
log.debug("encoding: encoding from lang-spec: %r",
norm_lang_encoding)
self.encoding = norm_lang_encoding
return
else:
log.debug("encoding: lang-spec encoding (%r) was *wrong*",
lang_encoding)
self._encoding_bozo(
"lang-spec encoding (%s) could not decode %s"
% (lang_encoding, self._accessor))
# 5. XML prolog
if self.langinfo and self.langinfo.conforms_to("XML"):
has_xml_prolog, xml_version, xml_encoding \
= self._get_xml_prolog_info_b(head_bytes)
if xml_encoding is not None:
norm_xml_encoding = _norm_encoding(xml_encoding)
if self._accessor.decode(norm_xml_encoding):
log.debug("encoding: encoding from XML prolog: %r",
norm_xml_encoding)
self.encoding = norm_xml_encoding
return
else:
log.debug("encoding: XML prolog encoding (%r) was *wrong*",
norm_xml_encoding)
self._encoding_bozo(
"XML prolog encoding (%s) could not decode %s"
% (norm_xml_encoding, self._accessor))
# 6. HTML: Attempt to use Content-Type meta tag.
if self.langinfo and self.langinfo.conforms_to("HTML"):
has_http_content_type_info, http_content_type, http_encoding \
= self._get_http_content_type_info_b(head_bytes)
if has_http_content_type_info and http_encoding:
norm_http_encoding = _norm_encoding(http_encoding)
if self._accessor.decode(norm_http_encoding):
log.debug("encoding: encoding from HTTP content-type: %r",
norm_http_encoding)
self.encoding = norm_http_encoding
return
else:
log.debug(
"encoding: HTTP content-type encoding (%r) was *wrong*",
norm_http_encoding)
self._encoding_bozo(
"HTML content-type encoding (%s) could not decode %s"
% (norm_http_encoding, self._accessor))
# 7. Emacs-style local vars.
emacs_head_vars = self._get_emacs_head_vars_b(head_bytes)
emacs_encoding = emacs_head_vars.get(b"coding")
if not emacs_encoding:
tail_bytes = self._accessor.tail_bytes
emacs_tail_vars = self._get_emacs_tail_vars_b(tail_bytes)
emacs_encoding = emacs_tail_vars.get(b"coding")
if emacs_encoding:
norm_emacs_encoding = _norm_encoding(emacs_encoding)
if self._accessor.decode(norm_emacs_encoding):
log.debug("encoding: encoding from Emacs coding var: %r",
norm_emacs_encoding)
self.encoding = norm_emacs_encoding
return
else:
log.debug("encoding: Emacs coding var (%r) was *wrong*",
norm_emacs_encoding)
self._encoding_bozo(
"Emacs coding var (%s) could not decode %s"
% (norm_emacs_encoding, self._accessor))
# 8. Vi[m]-style local vars.
vi_vars = self._get_vi_vars_b(head_bytes)
vi_encoding = vi_vars.get(b"fileencoding") or vi_vars.get(b"fenc")
if not vi_encoding:
vi_vars = self._get_vi_vars_b(self._accessor.tail_bytes)
vi_encoding = vi_vars.get(b"fileencoding") or vi_vars.get(b"fenc")
if vi_encoding:
norm_vi_encoding = _norm_encoding(vi_encoding)
if self._accessor.decode(norm_vi_encoding):
log.debug("encoding: encoding from Vi[m] coding var: %r",
norm_vi_encoding)
self.encoding = norm_vi_encoding
return
else:
log.debug("encoding: Vi[m] coding var (%r) was *wrong*",
norm_vi_encoding)
self._encoding_bozo(
"Vi[m] coding var (%s) could not decode %s"
% (norm_vi_encoding, self._accessor))
# 9. Heuristic checks for UTF-16 without BOM.
utf16_encoding = None
head_odd_bytes = head_bytes[0::2]
head_even_bytes = head_bytes[1::2]
head_markers = [b'<?xml', b'#!']
for head_marker in head_markers:
length = len(head_marker)
if head_odd_bytes.startswith(head_marker) \
and head_even_bytes[0:length] == b'\x00' * length:
utf16_encoding = "utf-16-le"
break
elif head_even_bytes.startswith(head_marker) \
and head_odd_bytes[0:length] == b'\x00' * length:
utf16_encoding = "utf-16-be"
break
internal_markers = [b'coding']
for internal_marker in internal_markers:
length = len(internal_marker)
try:
idx = head_odd_bytes.index(internal_marker)
except ValueError:
pass
else:
if head_even_bytes[idx:idx+length] == b'\x00' * length:
utf16_encoding = "utf-16-le"
try:
idx = head_even_bytes.index(internal_marker)
except ValueError:
pass
else:
if head_odd_bytes[idx:idx+length] == b'\x00' * length:
utf16_encoding = "utf-16-be"
if utf16_encoding:
if self._accessor.decode(utf16_encoding):
log.debug("encoding: guessed encoding: %r", utf16_encoding)
self.encoding = utf16_encoding
return
# 10. Give UTF-8 a try.
norm_utf8_encoding = _norm_encoding("utf-8")
if self._accessor.decode(norm_utf8_encoding):
log.debug("UTF-8 encoding: %r", norm_utf8_encoding)
self.encoding = norm_utf8_encoding
return
# 11. Lang-specific fallback (e.g. XML -> utf-8, Python -> ascii, ...).
# Note: A potential problem here is that a fallback encoding here that
# is a pre-Unicode Single-Byte encoding (like iso8859-1) always "works"
# so the subsequent heuristics never get tried.
fallback_encoding = None
fallback_lang = None
if self.langinfo:
fallback_lang = self.langinfo.name
fallback_encoding = self.langinfo.conformant_attr(
"default_encoding")
if fallback_encoding:
if self._accessor.decode(fallback_encoding):
log.debug("encoding: fallback encoding for %s: %r",
fallback_lang, fallback_encoding)
self.encoding = fallback_encoding
return
else:
log.debug("encoding: %s fallback encoding (%r) was *wrong*",
fallback_lang, fallback_encoding)
self._encoding_bozo(
"%s fallback encoding (%s) could not decode %s"
% (fallback_lang, fallback_encoding, self._accessor))
# 12. chardet (http://chardet.feedparser.org/)
# Note: I'm leary of using this b/c (a) it's a sizeable perf
# hit and (b) false positives -- for example, the first 8kB of
# /usr/bin/php on Mac OS X 10.4.10 is ISO-8859-2 with 44%
# confidence. :)
# Solution: (a) Only allow for content we know is not binary
# (from langinfo association); and (b) can be disabled via
# CHARDET_ENABLED class attribute.
if self.CHARDET_ENABLED and self.langinfo and self.langinfo.is_text:
try:
import chardet
except ImportError:
warnings.warn("no chardet module to aid in guessing encoding",
ChardetImportWarning)
else:
chardet_info = chardet.detect(head_bytes)
if chardet_info["encoding"] \
and chardet_info["confidence"] > self.CHARDET_THRESHHOLD:
chardet_encoding = chardet_info["encoding"]
norm_chardet_encoding = _norm_encoding(chardet_encoding)
if self._accessor.decode(norm_chardet_encoding):
log.debug("chardet encoding: %r", chardet_encoding)
self.encoding = norm_chardet_encoding
return
# 13. locale.getpreferredencoding()
# Typical values for this:
# Windows: cp1252 (aka windows-1252)
# Mac OS X: mac-roman
# Linux: UTF-8 (modern Linux anyway)
# Solaris 8: 464 (aka ASCII)
locale_encoding = locale.getpreferredencoding()
if locale_encoding:
norm_locale_encoding = _norm_encoding(locale_encoding)
if self._accessor.decode(norm_locale_encoding):
log.debug("encoding: locale preferred encoding: %r",
locale_encoding)
self.encoding = norm_locale_encoding
return
# 14. iso8859-1
norm_fallback8bit_encoding = _norm_encoding("iso8859-1")
if self._accessor.decode(norm_fallback8bit_encoding):
log.debug(
"fallback 8-bit encoding: %r", norm_fallback8bit_encoding)
self.encoding = norm_fallback8bit_encoding
return
# We couldn't find an encoding that works. Give up and presume
# this is binary content.
self.is_text = False
def _encoding_bozo(self, reason):
self.encoding_bozo = True
if self.encoding_bozo_reasons is None:
self.encoding_bozo_reasons = []
self.encoding_bozo_reasons.append(reason)
# c.f. http://www.xml.com/axml/target.html#NT-prolog
_xml_prolog_pat_s = re.compile(
r'''<\?xml
( # strict ordering is reqd but we'll be liberal here
\s+version=['"](?P<ver>.*?)['"]
| \s+encoding=['"](?P<enc>.*?)['"]
)+
.*? # other possible junk
\s*\?>
''',
re.VERBOSE | re.DOTALL
)
_xml_prolog_pat_b = re.compile(
br'''<\?xml
( # strict ordering is reqd but we'll be liberal here
\s+version=['"](?P<ver>.*?)['"]
| \s+encoding=['"](?P<enc>.*?)['"]
)+
.*? # other possible junk
\s*\?>
''',
re.VERBOSE | re.DOTALL
)
def _get_xml_prolog_info(self, head_bytes,
_xml_prolog_pat,
_start,
):
"""Parse out info from the '<?xml version=...' prolog, if any.
Returns (<has-xml-prolog>, <xml-version>, <xml-encoding>). Examples:
(False, None, None)
(True, "1.0", None)
(True, "1.0", "UTF-16")
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check or via
# the subsequent heuristic check for UTF-16 without a BOM.
if not head_bytes.startswith(_start):
return (False, None, None)
# Try to extract more info from the prolog.
match = _xml_prolog_pat.match(head_bytes)
if not match:
if log.isEnabledFor(logging.DEBUG):
log.debug("`%s': could not match XML prolog: '%s'", self.path,
_one_line_summary_from_text(_to_str(head_bytes), 40))
return (False, None, None)
xml_version = match.group("ver")
xml_encoding = match.group("enc")
return (True, xml_version, xml_encoding)
def _get_xml_prolog_info_s(self, head_bytes):
return self._get_xml_prolog_info(head_bytes,
self._xml_prolog_pat_s,
b'<?xml',
)
def _get_xml_prolog_info_b(self, head_bytes):
return self._get_xml_prolog_info(head_bytes,
self._xml_prolog_pat_b,
'<?xml',
)
_html_meta_tag_pat_s = re.compile(r"""
(<meta
(?:\s+[\w-]+\s*=\s*(?:".*?"|'.*?'))+ # attributes
\s*/?>)
""",
re.IGNORECASE | re.VERBOSE
)
_html_meta_tag_pat_b = re.compile(br"""
(<meta
(?:\s+[\w-]+\s*=\s*(?:".*?"|'.*?'))+ # attributes
\s*/?>)
""",
re.IGNORECASE | re.VERBOSE
)
_html_attr_pat_s = re.compile(
# Currently requiring XML attrs (i.e. quoted value).
r'''(?:\s+([\w-]+)\s*=\s*(".*?"|'.*?'))'''
)
_html_attr_pat_b = re.compile(
# Currently requiring XML attrs (i.e. quoted value).
br'''(?:\s+([\w-]+)\s*=\s*(".*?"|'.*?'))'''
)
_http_content_type_splitter_s = re.compile(r";\s*")
_http_content_type_splitter_b = re.compile(br";\s*")
def _get_http_content_type_info(self, head_bytes,
_html_meta_tag_pat,
_html_attr_pat,
_http_content_type_splitter,
_http_equiv,
_content,
_content_type,
_charset,
_empty,
_str1,
_str2,
):
"""Returns info extracted from an HTML content-type meta tag if any.
Returns (<has-http-content-type-info>, <content-type>, <charset>).
For example:
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
yields:
(True, "text/html", "utf-8")
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
# Parse out '<meta ...>' tags, then the attributes in them.
for meta_tag in _html_meta_tag_pat.findall(head_bytes):
meta = dict((k.lower(), v[1:-1])
for k, v in _html_attr_pat.findall(meta_tag))
if _http_equiv in meta \
and meta[_http_equiv].lower() == _content_type:
content = meta.get(_content, _empty)
break
else:
return (False, None, None)
# We found a http-equiv="Content-Type" tag, parse its content
# attribute value.
parts = [
p.strip() for p in _http_content_type_splitter.split(content)]
if not parts:
return (False, None, None)
content_type = parts[0] or None
for p in parts[1:]:
if p.lower().startswith(_charset):
charset = p[len(_charset):]
if charset and charset[0] in (_str1, _str2):
charset = charset[1:]
if charset and charset[-1] in (_str1, _str2):
charset = charset[:-1]
break
else:
charset = None
return (True, content_type, charset)
def _get_http_content_type_info_s(self, head_bytes):
return self._get_http_content_type_info(head_bytes,
self._html_meta_tag_pat_s,
self._html_attr_pat_s,
self._http_content_type_splitter_s,
"http-equiv",
"content",
"content-type",
"charset=",
"",
"'",
'"',
)
def _get_http_content_type_info_b(self, head_bytes):
return self._get_http_content_type_info(head_bytes,
self._html_meta_tag_pat_b,
self._html_attr_pat_b,
self._http_content_type_splitter_b,
b"http-equiv",
b"content",
b"content-type",
b"charset=",
b"",
b"'",
b'"',
)
# TODO: Note that this isn't going to catch the current HTML 5
# doctype: '<!DOCTYPE html>'
_doctype_decl_re_s = re.compile(r'''
<!DOCTYPE
\s+(?P<name>[a-zA-Z_:][\w:.-]*)
\s+(?:
SYSTEM\s+(["'])(?P<system_id_a>.*?)\2
|
PUBLIC
\s+(["'])(?P<public_id_b>.*?)\4
# HTML 3.2 and 2.0 doctypes don't include a system-id.
(?:\s+(["'])(?P<system_id_b>.*?)\6)?
)
(\s*\[.*?\])?
\s*>
''', re.IGNORECASE | re.DOTALL | re.UNICODE | re.VERBOSE)
_doctype_decl_re_b = re.compile(br'''
<!DOCTYPE
\s+(?P<name>[a-zA-Z_:][\w:.-]*)
\s+(?:
SYSTEM\s+(["'])(?P<system_id_a>.*?)\2
|
PUBLIC
\s+(["'])(?P<public_id_b>.*?)\4
# HTML 3.2 and 2.0 doctypes don't include a system-id.
(?:\s+(["'])(?P<system_id_b>.*?)\6)?
)
(\s*\[.*?\])?
\s*>
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
def _get_doctype_decl_info(self, head,
_doctype_decl_re,
_doctype,
_spaces,
_space,
):
"""Parse out DOCTYPE info from the given XML or HTML content.
Returns a tuple of the form:
(<has-doctype-decl>, <doctype-decl>,
<name>, <public-id>, <system-id>)
The <public-id> is normalized as per this comment in the XML 1.0
spec:
Before a match is attempted, all strings of white space in the
public identifier must be normalized to single space
characters (#x20), and leading and trailing white space must
be removed.
Examples:
(False, None, None, None, None)
(True, '<!DOCTYPE greeting SYSTEM "hello.dtd">',
'greeting', None, 'hello.dtd'),
(True,
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'html',
'-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')
Here is the spec for DOCTYPE decls in XML:
http://www.xml.com/axml/target.html#NT-doctypedecl
We loosely follow this to allow for some decls in HTML that isn't
proper XML. As well, we are only parsing out decls that reference
an external ID, as opposed to those that define entities locally.
"""
if _doctype not in head: # quick out
return (False, None, None, None, None)
m = _doctype_decl_re.search(head)
if not m:
return (False, None, None, None, None)
d = m.groupdict()
name = d.get("name")
system_id = d.get("system_id_a") or d.get("system_id_b")
public_id = d.get("public_id_b")
if public_id:
public_id = re.sub(_spaces, _space, public_id.strip()) # normalize
return (True, m.group(0), name, public_id, system_id)
def _get_doctype_decl_info_s(self, head):
return self._get_doctype_decl_info(head,
self._doctype_decl_re_s,
"<!DOCTYPE",
"\s+",
' ',
)
def _get_doctype_decl_info_b(self, head):
return self._get_doctype_decl_info(head,
self._doctype_decl_re_b,
b"<!DOCTYPE",
b"\s+",
b' ',
)
_emacs_vars_head_pat_s = re.compile(r'-\*-\s*(.*?)\s*-\*-')
_emacs_vars_head_pat_b = re.compile(br'-\*-\s*(.*?)\s*-\*-')
_emacs_head_vars_cache_s = None
_emacs_head_vars_cache_b = None
def _get_emacs_head_vars(self, head_bytes,
_emacs_vars_head_pat,
_one_liner,
_new_line,
_colon,
_semi_colon,
_str1,
_str2,
_mode,
):
"""Return a dictionary of emacs-style local variables in the head.
"Head" emacs vars on the ones in the '-*- ... -*-' one-liner.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
# Search the head for a '-*-'-style one-liner of variables.
emacs_vars = {}
if _one_liner in head_bytes:
match = _emacs_vars_head_pat.search(head_bytes)
if match:
emacs_vars_str = match.group(1)
if _new_line in emacs_vars_str:
raise ValueError("local variables error: -*- not "
"terminated before end of line")
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(_semi_colon)
if s.strip()]
if len(emacs_var_strs) == 1 and _colon not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars[_mode] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(
_colon, 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith(_str1) and val.endswith(_str1)
or val.startswith(_str2) and val.endswith(_str2)):
emacs_vars[var] = val[1:-1]
return emacs_vars
def _get_emacs_head_vars_s(self, head_bytes):
if self._emacs_head_vars_cache_s is None:
self._emacs_head_vars_cache_s = self._get_emacs_head_vars(head_bytes,
self._emacs_vars_head_pat_s,
'-*-',
'\n',
':',
';',
'"',
"'",
'mode',
)
return self._emacs_head_vars_cache_s
def _get_emacs_head_vars_b(self, head_bytes):
if self._emacs_head_vars_cache_b is None:
self._emacs_head_vars_cache_b = self._get_emacs_head_vars(head_bytes,
self._emacs_vars_head_pat_b,
b'-*-',
b'\n',
b':',
b';',
b'"',
b"'",
b'mode',
)
return self._emacs_head_vars_cache_b
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_vars_tail_pat_s = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
_emacs_vars_tail_pat_b = re.compile(br"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
_emacs_tail_vars_cache = None
def _get_emacs_tail_vars(self, tail_bytes,
_emacs_vars_tail_pat,
_local_variables,
_continued_for,
_colon,
_space,
_str1,
_str2,
):
r"""Return a dictionary of emacs-style local variables in the tail.
"Tail" emacs vars on the ones in the multi-line "Local
Variables:" block.
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\n# End:')
{'foo': 'bar'}
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\\\n# baz\n# End:')
{'foo': 'bar baz'}
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# quoted: "bar "\n# End:')
{'quoted': 'bar '}
Parsing is done according to this spec (and according to some
in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_chapter/emacs_33.html#SEC485
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
if self._emacs_tail_vars_cache is not None:
return self._emacs_tail_vars_cache
emacs_vars = {}
if _local_variables not in tail_bytes:
self._emacs_tail_vars_cache = emacs_vars
return emacs_vars
match = _emacs_vars_tail_pat.search(tail_bytes)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
# print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix:
line = line[len(prefix):] # strip prefix
if suffix:
line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith(_continued_for):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += _space + line
else:
try:
variable, value = line.split(_colon, 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this
# block.
value = value.strip()
if value.endswith(_continued_for):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith(_str1) and val.endswith(_str1)
or val.startswith(_str2) and val.endswith(_str2)):
emacs_vars[var] = val[1:-1]
self._emacs_tail_vars_cache = emacs_vars
return emacs_vars
def _get_emacs_tail_vars_s(self, tail_bytes):
return self._get_emacs_tail_vars(tail_bytes,
self._emacs_vars_tail_pat_s,
"Local Variables",
'\\',
':',
' ',
'"',
"'",
)
def _get_emacs_tail_vars_b(self, tail_bytes):
return self._get_emacs_tail_vars(tail_bytes,
self._emacs_vars_tail_pat_b,
b"Local Variables",
b'\\',
b':',
b' ',
b'"',
b"'",
)
# Note: It might nice if parser also gave which of 'vi, vim, ex' and
# the range in the accessor.
_vi_vars_pats_and_splitters_s = [
(re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(r'[ \t]+')),
(re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*(?P<rhs>.*?)$', re.M),
re.compile(r'[ \t:]+')),
(re.compile(r'^(vi|vim([<>=]?\d{3})?):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(r'[ \t]+')),
]
_vi_vars_pats_and_splitters_b = [
(re.compile(br'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(br'[ \t]+')),
(re.compile(br'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*(?P<rhs>.*?)$', re.M),
re.compile(br'[ \t:]+')),
(re.compile(br'^(vi|vim([<>=]?\d{3})?):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(br'[ \t]+')),
]
_vi_vars_cache_b = None
_vi_vars_cache_s = None
def _get_vi_vars(self, bytes,
_vi_vars_pats_and_splitters,
_types,
_eq,
_colon,
_ecolon,
):
r"""Return a dict of Vi[m] modeline vars.
See ":help modeline" in Vim for a spec.
>>> TextInfo()._get_vi_vars("/* vim: set ai tw=75: */")
{'ai': None, 'tw': 75}
>>> TextInfo()._get_vi_vars("vim: set ai tw=75: bar")
{'ai': None, 'tw': 75}
>>> TextInfo()._get_vi_vars("vi: set foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" vi: se foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" ex: se foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
{'tw': 75, 'sw': 3, 'noai': None}
>>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
{'tw': 75, 'sw': 3, 'noai': None}
>>> TextInfo()._get_vi_vars("ex: se foo:bar")
{}
Some edge cases:
>>> TextInfo()._get_vi_vars(r"/* vi:set dir=c\:\tmp: */")
{'dir': 'c:\\tmp'}
"""
# Presume 8-bit encoding... yada yada.
vi_vars = {}
# TODO: Consider reducing support to just "vi:" for speed. This
# function takes way too much time.
if not any(t in bytes for t in _types):
return vi_vars
for pat, splitter in _vi_vars_pats_and_splitters:
match = pat.search(bytes)
if match:
for var_str in splitter.split(match.group("rhs")):
if _eq in var_str:
name, value = var_str.split(_eq, 1)
try:
vi_vars[name] = int(value)
except ValueError:
vi_vars[name] = value.replace(_ecolon, _colon)
else:
vi_vars[var_str] = None
break
return vi_vars
def _get_vi_vars_s(self, bytes):
if self._vi_vars_cache_s is None:
self._vi_vars_cache_s = self._get_vi_vars(bytes,
self._vi_vars_pats_and_splitters_s,
['vi:', 'ex:', 'vim:'],
'=',
':',
'\\:',
)
return self._vi_vars_cache_s
def _get_vi_vars_b(self, bytes):
if self._vi_vars_cache_b is None:
self._vi_vars_cache_b = self._get_vi_vars(bytes,
self._vi_vars_pats_and_splitters_b,
[b'vi:', b'ex:', b'vim:'],
b'=',
b':',
b'\\:',
)
return self._vi_vars_cache_b
def _get_bom_info(self):
r"""Returns (<has-bom>, <bom>, <bom-encoding>). Examples:
(True, '\xef\xbb\xbf', "utf-8")
(True, '\xff\xfe', "utf-16-le")
(False, None, None)
"""
boms_and_encodings = [ # in order from longest to shortest
(codecs.BOM_UTF32_LE, "utf-32-le"),
(codecs.BOM_UTF32_BE, "utf-32-be"),
(codecs.BOM_UTF8, "utf-8"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
]
head_4 = self._accessor.head_4_bytes
for bom, encoding in boms_and_encodings:
if head_4.startswith(bom):
return (True, bom, encoding)
break
else:
return (False, None, None)
def _classify_from_filename(self, lidb, env):
"""Classify from the path *filename* only.
Sets `lang' and `langinfo', if can be determined.
"""
filename = basename(self.path)
if env is not None:
li = env.langinfo_from_filename(filename)
if li:
log.debug("lang from env: `%s' -> `%s'", filename, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
# ...from the ext
idx = 0
while True:
idx = filename.find('.', idx)
if idx == -1:
break
ext = filename[idx:]
li = lidb.langinfo_from_ext(ext)
if li:
log.debug("lang from ext: `%s' -> `%s'", ext, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
idx += 1
# ...from file basename
li = lidb.langinfo_from_filename(filename)
if li:
log.debug("lang from filename: `%s' -> `%s'", filename, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
def _classify_from_stat(self, lidb):
"""Set some `file_*' attributes from stat mode."""
from stat import S_ISREG, S_ISDIR, S_ISLNK, S_ISFIFO, S_ISSOCK, \
S_ISBLK, S_ISCHR, S_IMODE, S_IFMT
stat = self._accessor.stat
st_mode = stat.st_mode
self.file_type = S_IFMT(st_mode)
self.file_mode = S_IMODE(st_mode)
self.file_stat = stat
if S_ISREG(st_mode):
self.file_type_name = "regular file"
elif S_ISDIR(st_mode):
self.file_type_name = "directory"
elif S_ISLNK(st_mode):
self.file_type_name = "symbolic link"
elif S_ISFIFO(st_mode):
self.file_type_name = "fifo"
elif S_ISSOCK(st_mode):
self.file_type_name = "socket"
elif S_ISBLK(st_mode):
self.file_type_name = "block special"
elif S_ISCHR(st_mode):
self.file_type_name = "character special"
def _norm_encoding(encoding):
"""Normalize the encoding name -- where "normalized" is what
Python's codec's module calls it.
Interesting link:
The IANA-registered set of character sets.
http://www.iana.org/assignments/character-sets
"""
try:
# This requires Python >=2.5.
return codecs.lookup(encoding).name
except LookupError:
return encoding
#---- accessor API
# The idea here is to abstract accessing the text file content being
# classified to allow, e.g. classifying content without a file, from
# a Komodo buffer, etc.
class Accessor(object):
"""Virtual base class defining Accessor API for accessing
text content.
"""
# API:
# prop head_bytes -> head 8k bytes
# prop head_4_bytes -> head 4 bytes (useful for BOM detection)
# prop tail_bytes -> tail 8k bytes
# def bytes_range(start, end) -> bytes in that range
HEAD_SIZE = pow(2, 13) # 8k
TAIL_SIZE = pow(2, 13) # 8k
encoding = None
text = None
_unsuccessful_encodings = None
def decode(self, encoding):
"""Decodes bytes with the given encoding and, if successful,
sets `self.text` with the decoded result and returns True.
Otherwise, returns False.
Side-effects: On success, sets `self.text` and `self.encoding`.
Optimization: First an attempt is made to decode
`self.head_bytes` instead of all of `self.bytes`. This allows
for the normal usage in `TextInfo._classify_encoding()` to *not*
bother fully reading binary files that could not be decoded.
Optimization: Decoding attempts are cached to not bother
attempting a failed decode twice.
"""
if self._unsuccessful_encodings is None:
self._unsuccessful_encodings = set()
if encoding in self._unsuccessful_encodings:
return False
elif encoding == self.encoding:
return True
head_bytes = self.head_bytes
try:
head_bytes.decode(encoding, 'strict')
except LookupError as ex:
log.debug("encoding lookup error: %r", encoding)
self._unsuccessful_encodings.add(encoding)
return False
except UnicodeError as ex:
# If the decode failed in the last few bytes, it might be
# because a multi-surrogate was cutoff by the head. Ignore
# the error here, if it is truly not of this encoding, the
# full file decode will fail.
if ex.start >= self.HEAD_SIZE - 5:
# '5' because the max num bytes to encode a single char
# in any encoding is 6 bytes (in UTF-8).
pass
else:
self._unsuccessful_encodings.add(encoding)
return False
try:
self.text = self.bytes.decode(encoding, 'strict')
except UnicodeError as ex:
self._unsuccessful_encodings.add(encoding)
return False
self.encoding = encoding
return True
class PathAccessor(Accessor):
"""Accessor API for a path."""
(READ_NONE, # _file==None, file not opened yet
READ_HEAD, # _bytes==<head bytes>
READ_TAIL, # _bytes==<head>, _bytes_tail==<tail>
READ_ALL) = list(range(4)) # _bytes==<all>, _bytes_tail==None, _file closed
_read_state = READ_NONE # one of the READ_* states
_file = None
_bytes = None
_bytes_tail = None
def __init__(self, path, follow_symlinks=False):
self.path = path
self.follow_symlinks = follow_symlinks
def __str__(self):
return "path `%s'" % self.path
_stat_cache = None
@property
def stat(self):
if self._stat_cache is None:
if self.follow_symlinks:
self._stat_cache = os.stat(self.path)
else:
self._stat_cache = os.lstat(self.path)
return self._stat_cache
@property
def size(self):
return self.stat.st_size
def __del__(self):
self.close()
def close(self):
if self._file and not self._file.closed:
self._file.close()
def _read(self, state):
"""Read up to at least `state`."""
# TODO: If `follow_symlinks` is False and this is a symlink we
# must use os.readlink() here.
# It is the job of the caller to only call _read() if necessary.
assert self._read_state < state
try:
if self._read_state == self.READ_NONE:
assert self._file is None and self._bytes is None
self._file = open(self.path, 'rb')
if state == self.READ_HEAD:
self._bytes = self._file.read(self.HEAD_SIZE)
self._read_state = (self.size <= self.HEAD_SIZE
and self.READ_ALL or self.READ_HEAD)
elif state == self.READ_TAIL:
if self.size <= self.HEAD_SIZE + self.TAIL_SIZE:
self._bytes = self._file.read()
self._read_state = self.READ_ALL
else:
self._bytes = self._file.read(self.HEAD_SIZE)
self._file.seek(
-self.TAIL_SIZE, 2) # 2 == relative to end
self._bytes_tail = self._file.read(self.TAIL_SIZE)
self._read_state = self.READ_TAIL
elif state == self.READ_ALL:
self._bytes = self._file.read()
self._read_state = self.READ_ALL
elif self._read_state == self.READ_HEAD:
if state == self.READ_TAIL:
if self.size <= self.HEAD_SIZE + self.TAIL_SIZE:
self._bytes += self._file.read()
self._read_state = self.READ_ALL
else:
self._file.seek(
-self.TAIL_SIZE, 2) # 2 == relative to end
self._bytes_tail = self._file.read(self.TAIL_SIZE)
self._read_state = self.READ_TAIL
elif state == self.READ_ALL:
self._bytes += self._file.read()
self._read_state = self.READ_ALL
elif self._read_state == self.READ_TAIL:
assert state == self.READ_ALL
self._file.seek(self.HEAD_SIZE, 0) # 0 == relative to start
remaining_size = self.size - self.HEAD_SIZE - self.TAIL_SIZE
assert remaining_size > 0, \
"negative remaining bytes to read from '%s': %d" \
% (self.path, self.size)
self._bytes += self._file.read(remaining_size)
self._bytes += self._bytes_tail
self._bytes_tail = None
self._read_state = self.READ_ALL
if self._read_state == self.READ_ALL:
self.close()
except Exception as ex:
log.warn("Could not read file: %r due to: %r", self.path, ex)
raise
def strip_bom(self, bom):
"""This should be called by the user of this class to strip a
detected BOM from the bytes for subsequent decoding and
analysis.
"""
assert self._bytes[:len(bom)] == bom
self._bytes = self._bytes[len(bom):]
@property
def head_bytes(self):
"""The first 8k raw bytes of the document."""
if self._read_state < self.READ_HEAD:
self._read(self.READ_HEAD)
return self._bytes[:self.HEAD_SIZE]
@property
def head_4_bytes(self):
if self._read_state < self.READ_HEAD:
self._read(self.READ_HEAD)
return self._bytes[:4]
@property
def tail_bytes(self):
if self._read_state < self.READ_TAIL:
self._read(self.READ_TAIL)
if self._read_state == self.READ_ALL:
return self._bytes[-self.TAIL_SIZE:]
else:
return self._bytes_tail
def bytes_range(self, start, end):
if self._read_state < self.READ_ALL:
self._read(self.READ_ALL)
return self._bytes[start:end]
@property
def bytes(self):
if self._read_state < self.READ_ALL:
self._read(self.READ_ALL)
return self._bytes
#---- internal support stuff
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: text_escape (0.2)
def _escaped_text_from_text(text, escapes="eol"):
r"""Return escaped version of text.
"escapes" is either a mapping of chars in the source text to
replacement text for each such char or one of a set of
strings identifying a particular escape style:
eol
replace EOL chars with '\r' and '\n', maintain the actual
EOLs though too
whitespace
replace EOL chars as above, tabs with '\t' and spaces
with periods ('.')
eol-one-line
replace EOL chars with '\r' and '\n'
whitespace-one-line
replace EOL chars as above, tabs with '\t' and spaces
with periods ('.')
"""
# TODO:
# - Add 'c-string' style.
# - Add _escaped_html_from_text() with a similar call sig.
import re
if isinstance(escapes, str):
if escapes == "eol":
escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r"}
elif escapes == "whitespace":
escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r",
'\t': "\\t", ' ': "."}
elif escapes == "eol-one-line":
escapes = {'\n': "\\n", '\r': "\\r"}
elif escapes == "whitespace-one-line":
escapes = {'\n': "\\n", '\r': "\\r", '\t': "\\t", ' ': '.'}
else:
raise ValueError("unknown text escape style: %r" % escapes)
# Sort longer replacements first to allow, e.g. '\r\n' to beat '\r' and
# '\n'.
escapes_keys = list(escapes.keys())
try:
escapes_keys.sort(key=lambda a: len(a), reverse=True)
except TypeError:
# Python 2.3 support: sort() takes no keyword arguments
escapes_keys.sort(lambda a, b: cmp(len(a), len(b)))
escapes_keys.reverse()
def repl(match):
val = escapes[match.group(0)]
return val
escaped = re.sub("(%s)" % '|'.join([re.escape(k) for k in escapes_keys]),
repl,
text)
return escaped
def _one_line_summary_from_text(text, length=78,
escapes={'\n': "\\n", '\r': "\\r", '\t': "\\t"}):
r"""Summarize the given text with one line of the given length.
"text" is the text to summarize
"length" (default 78) is the max length for the summary
"escapes" is a mapping of chars in the source text to
replacement text for each such char. By default '\r', '\n'
and '\t' are escaped with their '\'-escaped repr.
"""
if len(text) > length:
head = text[:length-3]
else:
head = text
escaped = _escaped_text_from_text(head, escapes)
if len(text) > length:
summary = escaped[:length-3] + "..."
else:
summary = escaped
return summary
# Recipe: paths_from_path_patterns (0.5)
def _should_include_path(path, includes, excludes):
"""Return True iff the given path should be included."""
from os.path import basename
from fnmatch import fnmatch
base = basename(path)
if includes:
for include in includes:
if fnmatch(base, include):
try:
log.debug("include `%s' (matches `%s')", path, include)
except (NameError, AttributeError):
pass
break
else:
try:
log.debug("exclude `%s' (matches no includes)", path)
except (NameError, AttributeError):
pass
return False
for exclude in excludes:
if fnmatch(base, exclude):
try:
log.debug("exclude `%s' (matches `%s')", path, exclude)
except (NameError, AttributeError):
pass
return False
return True
def _walk(top, topdown=True, onerror=None, follow_symlinks=False):
"""A version of `os.walk()` with a couple differences regarding symlinks.
1. follow_symlinks=False (the default): A symlink to a dir is
returned as a *non*-dir. In `os.walk()`, a symlink to a dir is
returned in the *dirs* list, but it is not recursed into.
2. follow_symlinks=True: A symlink to a dir is returned in the
*dirs* list (as with `os.walk()`) but it *is conditionally*
recursed into (unlike `os.walk()`).
A symlinked dir is only recursed into if it is to a deeper dir
within the same tree. This is my understanding of how `find -L
DIR` works.
TODO: put as a separate recipe
"""
from os.path import join, isdir, islink, abspath
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
if follow_symlinks:
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
else:
for name in names:
path = join(top, name)
if islink(path):
nondirs.append(name)
elif isdir(path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if follow_symlinks and islink(path):
# Only walk this path if it links deeper in the same tree.
top_abs = abspath(top)
link_abs = abspath(join(top, os.readlink(path)))
if not link_abs.startswith(top_abs + os.sep):
continue
for x in _walk(path, topdown, onerror, follow_symlinks=follow_symlinks):
yield x
if not topdown:
yield top, dirs, nondirs
_NOT_SPECIFIED = ("NOT", "SPECIFIED")
def _paths_from_path_patterns(path_patterns, files=True, dirs="never",
recursive=True, includes=[], excludes=[],
skip_dupe_dirs=False,
follow_symlinks=False,
on_error=_NOT_SPECIFIED):
"""_paths_from_path_patterns([<path-patterns>, ...]) -> file paths
Generate a list of paths (files and/or dirs) represented by the given path
patterns.
"path_patterns" is a list of paths optionally using the '*', '?' and
'[seq]' glob patterns.
"files" is boolean (default True) indicating if file paths
should be yielded
"dirs" is string indicating under what conditions dirs are
yielded. It must be one of:
never (default) never yield dirs
always yield all dirs matching given patterns
if-not-recursive only yield dirs for invocations when
recursive=False
See use cases below for more details.
"recursive" is boolean (default True) indicating if paths should
be recursively yielded under given dirs.
"includes" is a list of file patterns to include in recursive
searches.
"excludes" is a list of file and dir patterns to exclude.
(Note: This is slightly different than GNU grep's --exclude
option which only excludes *files*. I.e. you cannot exclude
a ".svn" dir.)
"skip_dupe_dirs" can be set True to watch for and skip
descending into a dir that has already been yielded. Note
that this currently does not dereference symlinks.
"follow_symlinks" is a boolean indicating whether to follow
symlinks (default False). To guard against infinite loops
with circular dir symlinks, only dir symlinks to *deeper*
are followed.
"on_error" is an error callback called when a given path pattern
matches nothing:
on_error(PATH_PATTERN)
If not specified, the default is look for a "log" global and
call:
log.error("`%s': No such file or directory")
Specify None to do nothing.
Typically this is useful for a command-line tool that takes a list
of paths as arguments. (For Unix-heads: the shell on Windows does
NOT expand glob chars, that is left to the app.)
Use case #1: like `grep -r`
{files=True, dirs='never', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield nothing
script PATH* # yield all files matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #2: like `file -r` (if it had a recursive option)
{files=True, dirs='if-not-recursive', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #3: kind of like `find .`
{files=True, dirs='always', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files and dirs recursively under DIR
# (including DIR)
script -r PATH* # yield files and dirs matching PATH* and recursively
# under dirs; if none, call on_error(PATH*)
# callback
TODO: perf improvements (profile, stat just once)
"""
from os.path import basename, exists, isdir, join, normpath, abspath, \
lexists, islink, realpath
from glob import glob
assert not isinstance(path_patterns, str), \
"'path_patterns' must be a sequence, not a string: %r" % path_patterns
GLOB_CHARS = '*?['
if skip_dupe_dirs:
searched_dirs = set()
for path_pattern in path_patterns:
# Determine the set of paths matching this path_pattern.
for glob_char in GLOB_CHARS:
if glob_char in path_pattern:
paths = glob(path_pattern)
break
else:
if follow_symlinks:
paths = exists(path_pattern) and [path_pattern] or []
else:
paths = lexists(path_pattern) and [path_pattern] or []
if not paths:
if on_error is None:
pass
elif on_error is _NOT_SPECIFIED:
try:
log.error("`%s': No such file or directory", path_pattern)
except (NameError, AttributeError):
pass
else:
on_error(path_pattern)
for path in paths:
if (follow_symlinks or not islink(path)) and isdir(path):
if skip_dupe_dirs:
canon_path = normpath(abspath(path))
if follow_symlinks:
canon_path = realpath(canon_path)
if canon_path in searched_dirs:
continue
else:
searched_dirs.add(canon_path)
# 'includes' SHOULD affect whether a dir is yielded.
if (dirs == "always"
or (dirs == "if-not-recursive" and not recursive)
) and _should_include_path(path, includes, excludes):
yield path
# However, if recursive, 'includes' should NOT affect
# whether a dir is recursed into. Otherwise you could
# not:
# script -r --include="*.py" DIR
if recursive and _should_include_path(path, [], excludes):
for dirpath, dirnames, filenames in _walk(path,
follow_symlinks=follow_symlinks):
dir_indeces_to_remove = []
for i, dirname in enumerate(dirnames):
d = join(dirpath, dirname)
if skip_dupe_dirs:
canon_d = normpath(abspath(d))
if follow_symlinks:
canon_d = realpath(canon_d)
if canon_d in searched_dirs:
dir_indeces_to_remove.append(i)
continue
else:
searched_dirs.add(canon_d)
if dirs == "always" \
and _should_include_path(d, includes, excludes):
yield d
if not _should_include_path(d, [], excludes):
dir_indeces_to_remove.append(i)
for i in reversed(dir_indeces_to_remove):
del dirnames[i]
if files:
for filename in sorted(filenames):
f = join(dirpath, filename)
if _should_include_path(f, includes, excludes):
yield f
elif files and _should_include_path(path, includes, excludes):
yield path
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
# Recipe: pretty_logging (0.1) in C:\trentm\tm\recipes\cookbook
class _PerLevelFormatter(logging.Formatter):
"""Allow multiple format string -- depending on the log level.
A "fmtFromLevel" optional arg is added to the constructor. It can be
a dictionary mapping a log record level to a format string. The
usual "fmt" argument acts as the default.
"""
def __init__(self, fmt=None, datefmt=None, fmtFromLevel=None):
logging.Formatter.__init__(self, fmt, datefmt)
if fmtFromLevel is None:
self.fmtFromLevel = {}
else:
self.fmtFromLevel = fmtFromLevel
def format(self, record):
record.lowerlevelname = record.levelname.lower()
if record.levelno in self.fmtFromLevel:
# XXX This is a non-threadsafe HACK. Really the base Formatter
# class should provide a hook accessor for the _fmt
# attribute. *Could* add a lock guard here (overkill?).
_saved_fmt = self._fmt
self._fmt = self.fmtFromLevel[record.levelno]
try:
return logging.Formatter.format(self, record)
finally:
self._fmt = _saved_fmt
else:
return logging.Formatter.format(self, record)
def _setup_logging(stream=None):
"""Do logging setup:
We want a prettier default format:
do: level: ...
Spacing. Lower case. Skip " level:" if INFO-level.
"""
hdlr = logging.StreamHandler(stream)
defaultFmt = "%(name)s: %(levelname)s: %(message)s"
infoFmt = "%(name)s: %(message)s"
fmtr = _PerLevelFormatter(fmt=defaultFmt,
fmtFromLevel={logging.INFO: infoFmt})
hdlr.setFormatter(fmtr)
logging.root.addHandler(hdlr)
log.setLevel(logging.INFO)
#---- mainline
def main(argv):
usage = "usage: %prog PATHS..."
version = "%prog "+__version__
parser = optparse.OptionParser(usage=usage,
version=version, description=_cmdln_doc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("-q", "--quiet", dest="log_level",
action="store_const", const=logging.WARNING,
help="quieter output")
parser.add_option("-r", "--recursive", action="store_true",
help="recursively descend into given paths")
parser.add_option("-L", "--dereference", dest="follow_symlinks",
action="store_true",
help="follow symlinks, i.e. show info about linked-to "
"files and descend into linked dirs when recursive")
parser.add_option("-Q", "--quick-determine-lang", action="store_true",
help="Skip some processing to attempt to determine "
"language. Things like specialization, emacs/vi "
"local vars, full decoding, are skipped.")
parser.add_option("--encoding", help="suggested encoding for input files")
parser.add_option("-f", "--format",
help="format of output: summary (default), dict")
parser.add_option("-x", "--exclude", dest="excludes", action="append",
metavar="PATTERN",
help="path pattern to exclude for recursive search (by default SCC "
"control dirs are skipped)")
parser.set_defaults(log_level=logging.INFO, encoding=None, recursive=False,
follow_symlinks=False, format="summary",
excludes=[".svn", "CVS", ".hg", ".git", ".bzr"],
quick_determine_lang=False)
opts, args = parser.parse_args()
log.setLevel(opts.log_level)
if opts.log_level > logging.INFO:
warnings.simplefilter("ignore", ChardetImportWarning)
if args:
path_patterns = args
elif sys.stdin.isatty():
parser.print_help()
return 0
else:
def args_from_stdin():
for line in sys.stdin:
yield line.rstrip("\r\n")
path_patterns = args_from_stdin()
for path in _paths_from_path_patterns(
path_patterns, excludes=opts.excludes,
recursive=opts.recursive,
dirs="if-not-recursive",
follow_symlinks=opts.follow_symlinks):
try:
ti = textinfo_from_path(path, encoding=opts.encoding,
follow_symlinks=opts.follow_symlinks,
quick_determine_lang=opts.quick_determine_lang)
except OSError as ex:
log.error("%s: %s", path, ex)
continue
if opts.format == "summary":
print(ti.as_summary())
elif opts.format == "dict":
d = ti.as_dict()
if "text" in d:
del d["text"]
pprint(d)
else:
raise TextInfoError("unknown output format: %r" % opts.format)
if __name__ == "__main__":
_setup_logging()
try:
if "--self-test" in sys.argv:
import doctest
retval = doctest.testmod()[0]
else:
retval = main(sys.argv)
except SystemExit:
pass
except KeyboardInterrupt:
sys.exit(1)
except:
exc_info = sys.exc_info()
if log.isEnabledFor(logging.DEBUG):
import traceback
print()
traceback.print_exception(*exc_info)
else:
if hasattr(exc_info[0], "__name__"):
# log.error("%s: %s", exc_info[0].__name__, exc_info[1])
log.error(exc_info[1])
else: # string exception
log.error(exc_info[0])
sys.exit(1)
else:
sys.exit(retval)
| kpkhxlgy0/SublimeText3 | Packages/SublimeCodeIntel/libs/textinfo.py | Python | mit | 88,803 |
from jsmin import jsmin
xpath = """\
function xpath(root, xpath) {
if(root == null)
root = document;
var nt = XPathResult.ANY_UNORDERED_NODE_TYPE;
return document.evaluate(xpath, root, null, nt, null).singleNodeValue;
}
"""
in_flight = jsmin("""
function isHidden(el) {if(el === null) return true; return el.offsetParent === null;}
return {
jquery: jQuery.active,
prototype: (typeof Ajax === "undefined") ? 0 : Ajax.activeRequestCount,
miq: window.miqAjaxTimers,
spinner: (!isHidden(document.getElementById("spinner_div")))
&& isHidden(document.getElementById("lightbox_div")),
document: document.readyState,
autofocus: (typeof checkMiqQE === "undefined") ? 0 : checkMiqQE('autofocus'),
debounce: (typeof checkMiqQE === "undefined") ? 0 : checkMiqQE('debounce'),
miqQE: (typeof checkAllMiqQE === "undefined") ? 0 : checkAllMiqQE()
};
""")
update_retirement_date_function_script = """\
function updateDate(newValue) {
if(typeof $j == "undefined") {
var jq = $;
} else {
var jq = $j;
}
jq("#miq_date_1")[0].value = newValue;
miqSparkleOn();
jq.ajax({
type: 'POST',
url: '/vm_infra/retire_date_changed?miq_date_1='+newValue
}).done(
function(data){
eval(data);
}
)
}
"""
# Expects: arguments[0] = element, arguments[1] = value to set
set_angularjs_value_script = """\
(function(elem, value){
var angular_elem = angular.element(elem);
var $parse = angular_elem.injector().get('$parse');
var getter = $parse(elem.getAttribute('ng-model'));
var setter = getter.assign;
angular_elem.scope().$apply(function($scope) { setter($scope, value); });
}(arguments[0], arguments[1]));
"""
# The functions below do various JS magic to speed up the tree traversings to a maximum possible
# level.
# This function retrieves the root of the tree. Can wait for the tree to get initialized
_tree_get_root = """\
function get_root(loc) {
var start_time = new Date();
var root = null;
while(root === null && ((new Date()) - start_time) < 10000)
{
try {
root = $(loc).dynatree("getRoot");
} catch(err) {
// Nothing ...
}
}
return root;
}
"""
# This function is used to DRY the decision on which text to match
_get_level_name = xpath + """\
function get_level_name(level, by_id) {
if(by_id){
return level.li.getAttribute("id");
} else {
var e = xpath(level.li, "./span/a");
if(e === null)
return null;
else
return e.textContent;
}
}
"""
# needs xpath to work, provided by dependencies of the other functions
_expandable = """\
function expandable(el) {
return xpath(el.li, "./span/span[contains(@class, 'dynatree-expander')]") !== null;
}
"""
# This function reads whole tree. If it faces an ajax load, it returns false.
# If it does not return false, the result is complete.
read_tree = jsmin(_tree_get_root + _get_level_name + _expandable + """\
function read_tree(root, read_id, _root_tree) {
if(read_id === undefined)
read_id = false;
if(_root_tree === undefined)
_root_tree = true;
if(_root_tree) {
root = get_root(root);
if(root === null)
return null;
if(expandable(root) && (!root.bExpanded)) {
root.expand();
if(root.childList === null && root.data.isLazy){
return false;
}
}
var result = new Array();
var need_wait = false;
var children = (root.childList === null) ? [] : root.childList;
for(var i = 0; i < children.length; i++) {
var child = children[i];
var sub = read_tree(child, read_id, false);
if(sub === false)
need_wait = true;
else
result.push(sub);
}
if(need_wait)
return false;
else if(children.length == 0)
return null;
else
return result;
} else {
if(expandable(root) && (!root.bExpanded)) {
root.expand();
if(root.childList === null && root.data.isLazy){
return false;
}
}
var name = get_level_name(root, read_id);
var result = new Array();
var need_wait = false;
var children = (root.childList === null) ? [] : root.childList;
for(var i = 0; i < children.length; i++) {
var child = children[i];
var sub = read_tree(child, read_id, false);
if(sub === false)
need_wait = true;
else
result.push(sub);
}
if(need_wait)
return false;
else if(children.length == 0)
return name;
else
return [name, result]
}
}
""")
# This function searches for specified node by path. If it faces an ajax load, it returns false.
# If it does not return false, the result is complete.
find_leaf = jsmin(_tree_get_root + _get_level_name + _expandable + """\
function find_leaf(root, path, by_id) {
if(path.length == 0)
return null;
if(by_id === undefined)
by_id = false;
var item = get_root(root);
if(typeof item.childList === "undefined")
throw "CANNOT FIND TREE /" + root + "/";
var i; // The start of matching for path. Important because in one case, we already matched 1st
var lname = get_level_name(item, by_id);
if(item.childList.length == 1 && lname === null) {
item = item.childList[0];
i = 1;
if(get_level_name(item, by_id) != path[0])
throw "TREEITEM /" + path[0] + "/ NOT FOUND IN THE TREE";
} else if(lname === null) {
i = 0;
} else {
if(lname != path[0])
throw "TREEITEM /" + path[0] + "/ NOT FOUND IN THE TREE";
item = item.childList[0];
i = 1;
}
for(; i < path.length; i++) {
var last = (i + 1) == path.length;
var step = path[i];
var found = false;
if(expandable(item) && (!item.bExpanded)) {
item.expand();
if(item.childList === null)
return false; //We need to do wait_for_ajax and then repeat.
}
for(var j = 0; j < (item.childList || []).length; j++) {
var nextitem = item.childList[j];
var nextitem_name = get_level_name(nextitem, by_id);
if(nextitem_name == step) {
found = true;
item = nextitem;
break;
}
}
if(!found)
throw "TREEITEM /" + step + "/ NOT FOUND IN THE TREE";
}
return xpath(item.li, "./span/a");
}
""")
# TODO: Get the url: directly from the attribute in the page?
| nachandr/cfme_tests | cfme/js.py | Python | gpl-2.0 | 6,846 |
import rcpy
from rcpy._encoder import *
class Encoder:
def __init__(self, channel, count = None):
self.channel = channel
if count is not None:
self.set(count)
def get(self):
return get(self.channel)
def set(self, count):
set(self.channel, count)
def reset(self):
set(self.channel, 0)
# define leds
encoder1 = Encoder(1)
encoder2 = Encoder(2)
encoder3 = Encoder(3)
encoder4 = Encoder(4)
| mcdeoliveira/rcpy | rcpy/encoder.py | Python | mit | 469 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 12:56:06 2017
@author: sridhar
"""
from swan.widgets.mypgwidget import PyQtWidget3d
from numpy import count_nonzero, argmax, amax, zeros, trim_zeros, any as np_any
from sklearn.decomposition import PCA
from itertools import chain
class PgWidgetPCA(PyQtWidget3d):
def __init__(self, parent=None):
PyQtWidget3d.__init__(self, parent=parent)
layers = ["units", "sessions"]
self.toolbar.setup_radio_buttons(layers)
self.toolbar.doLayer.connect(self.trigger_refresh)
self.toolbar.collapsible_widget.set_content_layout(self.toolbar.grid_layout)
self.toolbar.main_grid_layout.setContentsMargins(0, 0, 0, 0)
self.setup_axes(gl_options='opaque')
self.positions = []
self.means = []
self.fill_alpha = 0.9
self.pg_canvas.set_clickable(True)
self.max_distance = 0
self.wave_length = 0 # Initial dummy value, later updated from data
def add_scatter_plot(self, plot_item=None, gl_options='opaque'):
self.add_scatter_plot_item(plot_item=plot_item, gl_options=gl_options)
def clear_plot(self):
self.reset_plot()
self.positions = []
self.means = []
def connect_means(self):
self.pg_canvas.set_means(self.means)
# for plot in self.pg_canvas.means:
# plot.sig_clicked.connect(self.get_item)
def do_plot(self, vum, data):
self.save_camera_position()
self.clear_plot()
if self.toolbar.activate_button.current_state:
layers = self.toolbar.get_checked_layers()
max_distance = 0
self.wave_length = data.get_wave_length()
active = vum.get_active().tolist()
if np_any(active) and layers:
for n, num in enumerate(active):
active[n] = trim_zeros(num, 'b')
if not active[n]:
active[n] = [0]
dom = argmax([count_nonzero(nu) for nu in active])
dom_session = []
for unit_index in range(len(active[dom])):
if active[dom][unit_index]:
runit = vum.get_realunit(dom, unit_index, data)
dom_session.append(data.get_data("all", runit))
m_dom_session, lv_dom_session = self.merge_session(dom_session)
pca = PCA(n_components=3)
dom_pca = pca.fit_transform(m_dom_session)
dom_ch_pca = self.split_waves(dom_pca, lv_dom_session, 'all')
for layer in layers:
if layer == "units":
for session_index in range(len(active)):
if session_index != dom:
session = []
for unit_index in range(len(active[session_index])):
if active[session_index][unit_index]:
runit = vum.get_realunit(session_index, unit_index, data)
session.append(data.get_data("all", runit))
merged_session, len_vec = self.merge_session(session)
try:
pca_session = self.split_waves(pca.transform(merged_session), len_vec, 'all')
max_distance = self.return_max(pca_session)
if max_distance > self.max_distance:
self.max_distance = max_distance
c = 0
for unit_index in range(len(active[session_index])):
if active[session_index][unit_index]:
col = vum.get_colour(unit_index)
col = tuple(val / 255. for val in col) + (self.fill_alpha,)
self.positions.append(
self.create_scatter_plot_item(pos=pca_session[c], size=1, color=col,
unit_id=unit_index, session=session_index,
px_mode=True))
self.means.append(
self.create_scatter_plot_item(pos=pca_session[c].mean(axis=0), size=15,
color=col, unit_id=unit_index,
session=session_index, px_mode=True,
clickable=True))
c += 1
del session
del merged_session
del pca_session
except ValueError:
pass
elif session_index == dom:
try:
max_distance = self.return_max(dom_ch_pca)
if max_distance > self.max_distance:
self.max_distance = max_distance
c = 0
for unit_index in range(len(active[dom])):
if active[dom][unit_index]:
col = vum.get_colour(unit_index)
col = tuple(val / 255. for val in col) + (self.fill_alpha,)
self.positions.append(
self.create_scatter_plot_item(pos=dom_ch_pca[c], size=1, color=col,
unit_id=unit_index, session=session_index,
px_mode=True))
self.means.append(
self.create_scatter_plot_item(pos=dom_ch_pca[c].mean(axis=0), size=15,
color=col, unit_id=unit_index,
session=session_index, px_mode=True,
clickable=True))
c += 1
except ValueError:
pass
del dom
del dom_session
del dom_ch_pca
del dom_pca
if len(self.positions) == len(self.means):
for item in self.positions:
self.add_scatter_plot(item, gl_options='translucent')
for mean in self.means:
self.add_scatter_plot(mean, gl_options='opaque')
self.connect_means()
else:
print("Something is wrong!")
print("Length of positions list: {}".format(len(self.positions)))
print("Length of means list: {}".format(len(self.means)))
if self.camera_position is not None:
self.restore_camera_position()
def merge_session(self, session):
total_length = 0
length_vector = [0]
for unit in session:
total_length += len(unit)
length_vector.append(total_length)
waves = zeros((total_length, self.wave_length))
for u, unit in enumerate(session):
for wf, wave in enumerate(unit):
waves[wf + length_vector[u]] = wave
return waves, length_vector
def split_waves(self, waves, length_vector, components):
session = []
if components == 'all':
for n in range(len(length_vector) - 1):
session.append(waves[length_vector[n]:length_vector[n + 1]])
else:
for n in range(len(length_vector) - 1):
session.append(waves[length_vector[n]:length_vector[n + 1], :components])
return session
def return_max(self, nested_list):
return amax(list(chain.from_iterable(nested_list)))
| INM-6/swan | swan/views/pca_3d_view.py | Python | bsd-3-clause | 8,724 |
# -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2011, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Generates and writes an APNX page mapping file.
'''
import re
import struct
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.utils.logging import default_log
from calibre import prints, fsync
from calibre.constants import DEBUG
class APNXBuilder(object):
'''
Create an APNX file using a pseudo page mapping.
'''
def write_apnx(self, mobi_file_path, apnx_path, method=None, page_count=0):
'''
If you want a fixed number of pages (such as from a custom column) then
pass in a value to page_count, otherwise a count will be estimated
using either the fast or accurate algorithm.
'''
import uuid
apnx_meta = {'guid': str(uuid.uuid4()).replace('-', '')[:8], 'asin':
'', 'cdetype': 'EBOK', 'format': 'MOBI_7', 'acr': ''}
with lopen(mobi_file_path, 'rb') as mf:
ident = PdbHeaderReader(mf).identity()
if ident != 'BOOKMOBI':
# Check that this is really a MOBI file.
raise Exception(_('Not a valid MOBI file. Reports identity of %s') % ident)
apnx_meta['acr'] = str(PdbHeaderReader(mf).name())
# We'll need the PDB name, the MOBI version, and some metadata to make FW 3.4 happy with KF8 files...
with lopen(mobi_file_path, 'rb') as mf:
mh = MetadataHeader(mf, default_log)
if mh.mobi_version == 8:
apnx_meta['format'] = 'MOBI_8'
else:
apnx_meta['format'] = 'MOBI_7'
if mh.exth is None or not mh.exth.cdetype:
apnx_meta['cdetype'] = 'EBOK'
else:
apnx_meta['cdetype'] = str(mh.exth.cdetype)
if mh.exth is None or not mh.exth.uuid:
apnx_meta['asin'] = ''
else:
apnx_meta['asin'] = str(mh.exth.uuid)
# Get the pages depending on the chosen parser
pages = []
if page_count:
pages = self.get_pages_exact(mobi_file_path, page_count)
else:
try:
if method == 'accurate':
pages = self.get_pages_accurate(mobi_file_path)
elif method == 'pagebreak':
pages = self.get_pages_pagebreak_tag(mobi_file_path)
if not pages:
pages = self.get_pages_accurate(mobi_file_path)
else:
raise Exception('%r is not a valid apnx generation method' % method)
except:
# Fall back to the fast parser if we can't
# use the accurate one. Typically this is
# due to the file having DRM.
pages = self.get_pages_fast(mobi_file_path)
if not pages:
pages = self.get_pages_fast(mobi_file_path)
if not pages:
raise Exception(_('Could not generate page mapping.'))
# Generate the APNX file from the page mapping.
apnx = self.generate_apnx(pages, apnx_meta)
# Write the APNX.
with lopen(apnx_path, 'wb') as apnxf:
apnxf.write(apnx)
fsync(apnxf)
def generate_apnx(self, pages, apnx_meta):
apnx = ''
if DEBUG:
prints('APNX META: guid:', apnx_meta['guid'])
prints('APNX META: ASIN:', apnx_meta['asin'])
prints('APNX META: CDE:', apnx_meta['cdetype'])
prints('APNX META: format:', apnx_meta['format'])
prints('APNX META: Name:', apnx_meta['acr'])
# Updated header if we have a KF8 file...
if apnx_meta['format'] == 'MOBI_8':
content_header = '{"contentGuid":"%(guid)s","asin":"%(asin)s","cdeType":"%(cdetype)s","format":"%(format)s","fileRevisionId":"1","acr":"%(acr)s"}' % apnx_meta # noqa
else:
# My 5.1.x Touch & 3.4 K3 seem to handle the 'extended' header fine for
# legacy mobi files, too. But, since they still handle this one too, let's
# try not to break old devices, and keep using the simple header ;).
content_header = '{"contentGuid":"%(guid)s","asin":"%(asin)s","cdeType":"%(cdetype)s","fileRevisionId":"1"}' % apnx_meta
page_header = '{"asin":"%(asin)s","pageMap":"(1,a,1)"}' % apnx_meta
if DEBUG:
prints('APNX Content Header:', content_header)
apnx += struct.pack('>I', 65537)
apnx += struct.pack('>I', 12 + len(content_header))
apnx += struct.pack('>I', len(content_header))
apnx += content_header
apnx += struct.pack('>H', 1)
apnx += struct.pack('>H', len(page_header))
apnx += struct.pack('>H', len(pages))
apnx += struct.pack('>H', 32)
apnx += page_header
# Write page values to APNX.
for page in pages:
apnx += struct.pack('>I', page)
return apnx
def get_pages_exact(self, mobi_file_path, page_count):
'''
Given a specified page count (such as from a custom column),
create our array of pages for the apnx file by dividing by
the content size of the book.
'''
pages = []
count = 0
with lopen(mobi_file_path, 'rb') as mf:
phead = PdbHeaderReader(mf)
r0 = phead.section_data(0)
text_length = struct.unpack('>I', r0[4:8])[0]
chars_per_page = int(text_length / page_count)
while count < text_length:
pages.append(count)
count += chars_per_page
if len(pages) > page_count:
# Rounding created extra page entries
pages = pages[:page_count]
return pages
def get_pages_fast(self, mobi_file_path):
'''
2300 characters of uncompressed text per page. This is
not meant to map 1 to 1 to a print book but to be a
close enough measure.
A test book was chosen and the characters were counted
on one page. This number was round to 2240 then 60
characters of markup were added to the total giving
2300.
Uncompressed text length is used because it's easily
accessible in MOBI files (part of the header). Also,
It's faster to work off of the length then to
decompress and parse the actual text.
'''
text_length = 0
pages = []
count = 0
with lopen(mobi_file_path, 'rb') as mf:
phead = PdbHeaderReader(mf)
r0 = phead.section_data(0)
text_length = struct.unpack('>I', r0[4:8])[0]
while count < text_length:
pages.append(count)
count += 2300
return pages
def get_pages_accurate(self, mobi_file_path):
'''
A more accurate but much more resource intensive and slower
method to calculate the page length.
Parses the uncompressed text. In an average paper back book
There are 32 lines per page and a maximum of 70 characters
per line.
Each paragraph starts a new line and every 70 characters
(minus markup) in a paragraph starts a new line. The
position after every 30 lines will be marked as a new
page.
This can be make more accurate by accounting for
<div class="mbp_pagebreak" /> as a new page marker.
And <br> elements as an empty line.
'''
pages = []
# Get the MOBI html.
mr = MobiReader(mobi_file_path, default_log)
if mr.book_header.encryption_type != 0:
# DRMed book
return self.get_pages_fast(mobi_file_path)
mr.extract_text()
# States
in_tag = False
in_p = False
check_p = False
closing = False
p_char_count = 0
# Get positions of every line
# A line is either a paragraph starting
# or every 70 characters in a paragraph.
lines = []
pos = -1
# We want this to be as fast as possible so we
# are going to do one pass across the text. re
# and string functions will parse the text each
# time they are called.
#
# We can can use .lower() here because we are
# not modifying the text. In this case the case
# doesn't matter just the absolute character and
# the position within the stream.
for c in mr.mobi_html.lower():
pos += 1
# Check if we are starting or stopping a p tag.
if check_p:
if c == '/':
closing = True
continue
elif c == 'p':
if closing:
in_p = False
else:
in_p = True
lines.append(pos - 2)
check_p = False
closing = False
continue
if c == '<':
in_tag = True
check_p = True
continue
elif c == '>':
in_tag = False
check_p = False
continue
if in_p and not in_tag:
p_char_count += 1
if p_char_count == 70:
lines.append(pos)
p_char_count = 0
# Every 30 lines is a new page
for i in xrange(0, len(lines), 32):
pages.append(lines[i])
return pages
def get_pages_pagebreak_tag(self, mobi_file_path):
'''
Determine pages based on the presense of
<mbp:pagebreak>.
'''
pages = []
# Get the MOBI html.
mr = MobiReader(mobi_file_path, default_log)
if mr.book_header.encryption_type != 0:
# DRMed book
return self.get_pages_fast(mobi_file_path)
mr.extract_text()
html = mr.mobi_html.lower()
for m in re.finditer('<[^>]*pagebreak[^>]*>', html):
pages.append(m.end())
return pages
| jelly/calibre | src/calibre/devices/kindle/apnx.py | Python | gpl-3.0 | 10,297 |
#!/bin/python
import serial
import multiprocessing
from lockfile import LockFile
def main():
lock = LockFile("/tmp/relay")
with lock:
ser = serial.Serial('/dev/ttyUSB0',19200,timeout=1)
ser.flushInput()
ser.write('t')
s = ser.readline()
print s
if __name__ == "__main__":
main()
| MappaM/monitoring | scripts/gettemp2.py | Python | gpl-2.0 | 330 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import functools
import logging
import os
import sys
import tempfile
import time
import traceback
import unittest
from builtins import range
import apache_beam as beam
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.runners.portability import fn_api_runner
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker import statesampler
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import userstate
from apache_beam.transforms import window
if statesampler.FAST_SAMPLER:
DEFAULT_SAMPLING_PERIOD_MS = statesampler.DEFAULT_SAMPLING_PERIOD_MS
else:
DEFAULT_SAMPLING_PERIOD_MS = 0
class FnApiRunnerTest(unittest.TestCase):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=False))
def test_assert_that(self):
# TODO: figure out a way for fn_api_runner to parse and raise the
# underlying exception.
with self.assertRaisesRegexp(Exception, 'Failed assert'):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a']))
def test_create(self):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a', 'b']))
def test_pardo(self):
with self.create_pipeline() as p:
res = (p
| beam.Create(['a', 'bc'])
| beam.Map(lambda e: e * 2)
| beam.Map(lambda e: e + 'x'))
assert_that(res, equal_to(['aax', 'bcbcx']))
def test_pardo_metrics(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns1', 'elements')
def process(self, element):
self.count.inc(element)
return [element]
class MyOtherDoFn(beam.DoFn):
def start_bundle(self):
self.count = beam.metrics.Metrics.counter('ns2', 'elementsplusone')
def process(self, element):
self.count.inc(element + 1)
return [element]
with self.create_pipeline() as p:
res = (p | beam.Create([1, 2, 3])
| 'mydofn' >> beam.ParDo(MyDoFn())
| 'myotherdofn' >> beam.ParDo(MyOtherDoFn()))
p.run()
if not MetricsEnvironment.METRICS_SUPPORTED:
self.skipTest('Metrics are not supported.')
counter_updates = [{'key': key, 'value': val}
for container in p.runner.metrics_containers()
for key, val in
container.get_updates().counters.items()]
counter_values = [update['value'] for update in counter_updates]
counter_keys = [update['key'] for update in counter_updates]
assert_that(res, equal_to([1, 2, 3]))
self.assertEqual(counter_values, [6, 9])
self.assertEqual(counter_keys, [
MetricKey('mydofn',
MetricName('ns1', 'elements')),
MetricKey('myotherdofn',
MetricName('ns2', 'elementsplusone'))])
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_side_outputs(self):
def tee(elem, *tags):
for tag in tags:
if tag in elem:
yield beam.pvalue.TaggedOutput(tag, elem)
with self.create_pipeline() as p:
xy = (p
| 'Create' >> beam.Create(['x', 'y', 'xy'])
| beam.FlatMap(tee, 'x', 'y').with_outputs())
assert_that(xy.x, equal_to(['x', 'xy']), label='x')
assert_that(xy.y, equal_to(['y', 'xy']), label='y')
def test_pardo_side_and_main_outputs(self):
def even_odd(elem):
yield elem
yield beam.pvalue.TaggedOutput('odd' if elem % 2 else 'even', elem)
with self.create_pipeline() as p:
ints = p | beam.Create([1, 2, 3])
named = ints | 'named' >> beam.FlatMap(
even_odd).with_outputs('even', 'odd', main='all')
assert_that(named.all, equal_to([1, 2, 3]), label='named.all')
assert_that(named.even, equal_to([2]), label='named.even')
assert_that(named.odd, equal_to([1, 3]), label='named.odd')
unnamed = ints | 'unnamed' >> beam.FlatMap(even_odd).with_outputs()
unnamed[None] | beam.Map(id) # pylint: disable=expression-not-assigned
assert_that(unnamed[None], equal_to([1, 2, 3]), label='unnamed.all')
assert_that(unnamed.even, equal_to([2]), label='unnamed.even')
assert_that(unnamed.odd, equal_to([1, 3]), label='unnamed.odd')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b', 'c'])
side = p | 'side' >> beam.Create(['x', 'y'])
assert_that(main | beam.FlatMap(cross_product, beam.pvalue.AsList(side)),
equal_to([('a', 'x'), ('b', 'x'), ('c', 'x'),
('a', 'y'), ('b', 'y'), ('c', 'y')]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_windowed_side_inputs(self):
with self.create_pipeline() as p:
# Now with some windowing.
pcoll = p | beam.Create(list(range(10))) | beam.Map(
lambda t: window.TimestampedValue(t, t))
# Intentionally choosing non-aligned windows to highlight the transition.
main = pcoll | 'WindowMain' >> beam.WindowInto(window.FixedWindows(5))
side = pcoll | 'WindowSide' >> beam.WindowInto(window.FixedWindows(7))
res = main | beam.Map(lambda x, s: (x, sorted(s)),
beam.pvalue.AsList(side))
assert_that(
res,
equal_to([
# The window [0, 5) maps to the window [0, 7).
(0, list(range(7))),
(1, list(range(7))),
(2, list(range(7))),
(3, list(range(7))),
(4, list(range(7))),
# The window [5, 10) maps to the window [7, 14).
(5, list(range(7, 10))),
(6, list(range(7, 10))),
(7, list(range(7, 10))),
(8, list(range(7, 10))),
(9, list(range(7, 10)))]),
label='windowed')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_flattened_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side1 = p | 'side1' >> beam.Create([('a', 1)])
side2 = p | 'side2' >> beam.Create([('b', 2)])
side = (side1, side2) | beam.Flatten()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': 1, 'b': 2})]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_gbk_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create([None])
side = p | 'side' >> beam.Create([('a', 1)]) | beam.GroupByKey()
assert_that(
main | beam.Map(lambda a, b: (a, b), beam.pvalue.AsDict(side)),
equal_to([(None, {'a': [1]})]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_multimap_side_input(self):
with self.create_pipeline() as p:
main = p | 'main' >> beam.Create(['a', 'b'])
side = (p | 'side' >> beam.Create([('a', 1), ('b', 2), ('a', 3)])
# TODO(BEAM-4782): Obviate the need for this map.
| beam.Map(lambda kv: (kv[0], kv[1])))
assert_that(
main | beam.Map(lambda k, d: (k, sorted(d[k])),
beam.pvalue.AsMultiMap(side)),
equal_to([('a', [1, 3]), ('b', [2])]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_pardo_unfusable_side_inputs(self):
def cross_product(elem, sides):
for side in sides:
yield elem, side
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(pcoll)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
with self.create_pipeline() as p:
pcoll = p | beam.Create(['a', 'b'])
derived = ((pcoll,) | beam.Flatten()
| beam.Map(lambda x: (x, x))
| beam.GroupByKey()
| 'Unkey' >> beam.Map(lambda kv: kv[0]))
assert_that(
pcoll | beam.FlatMap(cross_product, beam.pvalue.AsList(derived)),
equal_to([('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_state_only(self):
index_state_spec = userstate.CombiningValueStateSpec(
'index', beam.coders.VarIntCoder(), sum)
# TODO(ccy): State isn't detected with Map/FlatMap.
class AddIndex(beam.DoFn):
def process(self, kv, index=beam.DoFn.StateParam(index_state_spec)):
k, v = kv
index.add(1)
yield k, v, index.read()
inputs = [('A', 'a')] * 2 + [('B', 'b')] * 3
expected = [('A', 'a', 1),
('A', 'a', 2),
('B', 'b', 1),
('B', 'b', 2),
('B', 'b', 3)]
with self.create_pipeline() as p:
assert_that(p | beam.Create(inputs) | beam.ParDo(AddIndex()),
equal_to(expected))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_pardo_timers(self):
timer_spec = userstate.TimerSpec('timer', userstate.TimeDomain.WATERMARK)
class TimerDoFn(beam.DoFn):
def process(self, element, timer=beam.DoFn.TimerParam(timer_spec)):
unused_key, ts = element
timer.set(ts)
timer.set(2 * ts)
@userstate.on_timer(timer_spec)
def process_timer(self):
yield 'fired'
with self.create_pipeline() as p:
actual = (
p
| beam.Create([('k1', 10), ('k2', 100)])
| beam.ParDo(TimerDoFn())
| beam.Map(lambda x, ts=beam.DoFn.TimestampParam: (x, ts)))
expected = [('fired', ts) for ts in (20, 200)]
assert_that(actual, equal_to(expected))
def test_group_by_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.GroupByKey()
| beam.Map(lambda k_vs: (k_vs[0], sorted(k_vs[1]))))
assert_that(res, equal_to([('a', [1, 2]), ('b', [3])]))
def test_flatten(self):
with self.create_pipeline() as p:
res = (p | 'a' >> beam.Create(['a']),
p | 'bc' >> beam.Create(['b', 'c']),
p | 'd' >> beam.Create(['d'])) | beam.Flatten()
assert_that(res, equal_to(['a', 'b', 'c', 'd']))
def test_combine_per_key(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([('a', 1), ('a', 2), ('b', 3)])
| beam.CombinePerKey(beam.combiners.MeanCombineFn()))
assert_that(res, equal_to([('a', 1.5), ('b', 3.0)]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_read(self):
# Can't use NamedTemporaryFile as a context
# due to https://bugs.python.org/issue14243
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
temp_file.write(b'a\nb\nc')
temp_file.close()
with self.create_pipeline() as p:
assert_that(p | beam.io.ReadFromText(temp_file.name),
equal_to(['a', 'b', 'c']))
finally:
os.unlink(temp_file.name)
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_windowing(self):
with self.create_pipeline() as p:
res = (p
| beam.Create([1, 2, 100, 101, 102])
| beam.Map(lambda t: window.TimestampedValue(('k', t), t))
| beam.WindowInto(beam.transforms.window.Sessions(10))
| beam.GroupByKey()
| beam.Map(lambda k_vs1: (k_vs1[0], sorted(k_vs1[1]))))
assert_that(res, equal_to([('k', [1, 2]), ('k', [100, 101, 102])]))
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3.')
def test_large_elements(self):
with self.create_pipeline() as p:
big = (p
| beam.Create(['a', 'a', 'b'])
| beam.Map(lambda x: (x, x * data_plane._DEFAULT_FLUSH_THRESHOLD)))
side_input_res = (
big
| beam.Map(lambda x, side: (x[0], side.count(x[0])),
beam.pvalue.AsList(big | beam.Map(lambda x: x[0]))))
assert_that(side_input_res,
equal_to([('a', 2), ('a', 2), ('b', 1)]), label='side')
gbk_res = (
big
| beam.GroupByKey()
| beam.Map(lambda x: x[0]))
assert_that(gbk_res, equal_to(['a', 'b']), label='gbk')
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test is flaky on on Python 3. '
'TODO: BEAM-5692')
def test_error_message_includes_stage(self):
with self.assertRaises(BaseException) as e_cm:
with self.create_pipeline() as p:
def raise_error(x):
raise RuntimeError('x')
# pylint: disable=expression-not-assigned
(p
| beam.Create(['a', 'b'])
| 'StageA' >> beam.Map(lambda x: x)
| 'StageB' >> beam.Map(lambda x: x)
| 'StageC' >> beam.Map(raise_error)
| 'StageD' >> beam.Map(lambda x: x))
message = e_cm.exception.args[0]
self.assertIn('StageC', message)
self.assertNotIn('StageB', message)
def test_error_traceback_includes_user_code(self):
def first(x):
return second(x)
def second(x):
return third(x)
def third(x):
raise ValueError('x')
try:
with self.create_pipeline() as p:
p | beam.Create([0]) | beam.Map(first) # pylint: disable=expression-not-assigned
except Exception: # pylint: disable=broad-except
message = traceback.format_exc()
else:
raise AssertionError('expected exception not raised')
self.assertIn('first', message)
self.assertIn('second', message)
self.assertIn('third', message)
def test_no_subtransform_composite(self):
class First(beam.PTransform):
def expand(self, pcolls):
return pcolls[0]
with self.create_pipeline() as p:
pcoll_a = p | 'a' >> beam.Create(['a'])
pcoll_b = p | 'b' >> beam.Create(['b'])
assert_that((pcoll_a, pcoll_b) | First(), equal_to(['a']))
def test_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Metrics not supported.')
counter = beam.metrics.Metrics.counter('ns', 'counter')
distribution = beam.metrics.Metrics.distribution('ns', 'distribution')
gauge = beam.metrics.Metrics.gauge('ns', 'gauge')
pcoll = p | beam.Create(['a', 'zzz'])
# pylint: disable=expression-not-assigned
pcoll | 'count1' >> beam.FlatMap(lambda x: counter.inc())
pcoll | 'count2' >> beam.FlatMap(lambda x: counter.inc(len(x)))
pcoll | 'dist' >> beam.FlatMap(lambda x: distribution.update(len(x)))
pcoll | 'gauge' >> beam.FlatMap(lambda x: gauge.set(len(x)))
res = p.run()
res.wait_until_finish()
c1, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count1'))[
'counters']
self.assertEqual(c1.committed, 2)
c2, = res.metrics().query(beam.metrics.MetricsFilter().with_step('count2'))[
'counters']
self.assertEqual(c2.committed, 4)
dist, = res.metrics().query(beam.metrics.MetricsFilter().with_step('dist'))[
'distributions']
gaug, = res.metrics().query(
beam.metrics.MetricsFilter().with_step('gauge'))['gauges']
self.assertEqual(
dist.committed.data, beam.metrics.cells.DistributionData(4, 2, 1, 3))
self.assertEqual(dist.committed.mean, 2.0)
self.assertEqual(gaug.committed.value, 3)
def test_non_user_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Metrics not supported.')
pcoll = p | beam.Create(['a', 'zzz'])
# pylint: disable=expression-not-assigned
pcoll | 'MyStep' >> beam.FlatMap(lambda x: None)
res = p.run()
res.wait_until_finish()
result_metrics = res.monitoring_metrics()
all_metrics_via_montoring_infos = result_metrics.query()
def assert_counter_exists(metrics, namespace, name, step):
found = 0
metric_key = MetricKey(step, MetricName(namespace, name))
for m in metrics['counters']:
if m.key == metric_key:
found = found + 1
self.assertEqual(
1, found, "Did not find exactly 1 metric for %s." % metric_key)
urns = [
monitoring_infos.ELEMENT_COUNT_URN,
monitoring_infos.START_BUNDLE_MSECS_URN,
monitoring_infos.PROCESS_BUNDLE_MSECS_URN,
monitoring_infos.FINISH_BUNDLE_MSECS_URN,
monitoring_infos.TOTAL_MSECS_URN,
]
for urn in urns:
split = urn.split(':')
namespace = split[0]
name = ':'.join(split[1:])
assert_counter_exists(
all_metrics_via_montoring_infos, namespace, name, step='Create/Read')
assert_counter_exists(
all_metrics_via_montoring_infos, namespace, name, step='MyStep')
def test_progress_metrics(self):
p = self.create_pipeline()
if not isinstance(p.runner, fn_api_runner.FnApiRunner):
# This test is inherited by others that may not support the same
# internal way of accessing progress metrics.
self.skipTest('Progress metrics not supported.')
_ = (p
| beam.Create([0, 0, 0, 5e-3 * DEFAULT_SAMPLING_PERIOD_MS])
| beam.Map(time.sleep)
| beam.Map(lambda x: ('key', x))
| beam.GroupByKey()
| 'm_out' >> beam.FlatMap(lambda x: [
1, 2, 3, 4, 5,
beam.pvalue.TaggedOutput('once', x),
beam.pvalue.TaggedOutput('twice', x),
beam.pvalue.TaggedOutput('twice', x)]))
res = p.run()
res.wait_until_finish()
def has_mi_for_ptransform(monitoring_infos, ptransform):
for mi in monitoring_infos:
if ptransform in mi.labels['PTRANSFORM']:
return True
return False
try:
# TODO(ajamato): Delete this block after deleting the legacy metrics code.
# Test the DEPRECATED legacy metrics
pregbk_metrics, postgbk_metrics = list(
res._metrics_by_stage.values())
if 'Create/Read' not in pregbk_metrics.ptransforms:
# The metrics above are actually unordered. Swap.
pregbk_metrics, postgbk_metrics = postgbk_metrics, pregbk_metrics
self.assertEqual(
4,
pregbk_metrics.ptransforms['Create/Read']
.processed_elements.measured.output_element_counts['out'])
self.assertEqual(
4,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.output_element_counts['None'])
self.assertLessEqual(
4e-3 * DEFAULT_SAMPLING_PERIOD_MS,
pregbk_metrics.ptransforms['Map(sleep)']
.processed_elements.measured.total_time_spent)
self.assertEqual(
1,
postgbk_metrics.ptransforms['GroupByKey/Read']
.processed_elements.measured.output_element_counts['None'])
# The actual stage name ends up being something like 'm_out/lamdbda...'
m_out, = [
metrics for name, metrics in list(postgbk_metrics.ptransforms.items())
if name.startswith('m_out')]
self.assertEqual(
5,
m_out.processed_elements.measured.output_element_counts['None'])
self.assertEqual(
1,
m_out.processed_elements.measured.output_element_counts['once'])
self.assertEqual(
2,
m_out.processed_elements.measured.output_element_counts['twice'])
# Test the new MonitoringInfo monitoring format.
self.assertEqual(2, len(res._monitoring_infos_by_stage))
pregbk_mis, postgbk_mis = list(res._monitoring_infos_by_stage.values())
if not has_mi_for_ptransform(pregbk_mis, 'Create/Read'):
# The monitoring infos above are actually unordered. Swap.
pregbk_mis, postgbk_mis = postgbk_mis, pregbk_mis
def assert_has_monitoring_info(
monitoring_infos, urn, labels, value=None, ge_value=None):
# TODO(ajamato): Consider adding a matcher framework
found = 0
for m in monitoring_infos:
if m.labels == labels and m.urn == urn:
if (ge_value is not None and
m.metric.counter_data.int64_value >= ge_value):
found = found + 1
elif (value is not None and
m.metric.counter_data.int64_value == value):
found = found + 1
ge_value_str = {'ge_value' : ge_value} if ge_value else ''
value_str = {'value' : value} if value else ''
self.assertEqual(
1, found, "Found (%s) Expected only 1 monitoring_info for %s." %
(found, (urn, labels, value_str, ge_value_str),))
# pregbk monitoring infos
labels = {'PTRANSFORM' : 'Create/Read', 'TAG' : 'out'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=4)
labels = {'PTRANSFORM' : 'Map(sleep)', 'TAG' : 'None'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=4)
labels = {'PTRANSFORM' : 'Map(sleep)'}
assert_has_monitoring_info(
pregbk_mis, monitoring_infos.TOTAL_MSECS_URN,
labels, ge_value=4 * DEFAULT_SAMPLING_PERIOD_MS)
# postgbk monitoring infos
labels = {'PTRANSFORM' : 'GroupByKey/Read', 'TAG' : 'None'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=1)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'None'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=5)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'once'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=1)
labels = {'PTRANSFORM' : 'm_out', 'TAG' : 'twice'}
assert_has_monitoring_info(
postgbk_mis, monitoring_infos.ELEMENT_COUNT_URN, labels, value=2)
except:
print(res._monitoring_infos_by_stage)
raise
class FnApiRunnerTestWithGrpc(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=True))
class FnApiRunnerTestWithGrpcMultiThreaded(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(
use_grpc=True,
sdk_harness_factory=functools.partial(
sdk_worker.SdkHarness, worker_count=2)))
class FnApiRunnerTestWithBundleRepeat(FnApiRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner(use_grpc=False, bundle_repeat=3))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| rangadi/incubator-beam | sdks/python/apache_beam/runners/portability/fn_api_runner_test.py | Python | apache-2.0 | 25,989 |
#!/usr/bin/python
import sys
sys.path.append('..')
from bcert_pb2 import *
import binascii
# fill out a minimal bitcoin cert
cert = BitcoinCert()
# first the data part (the part is later signed by the "higher level cert" or "the blockchain")
cert.data.version = '0.1'
cert.data.subjectname = 'Foo Inc.'
email = cert.data.contacts.add()
email.type = email.EMAIL
email.value = 'foo@fooinc.com'
url = cert.data.contacts.add()
url.type = url.URL
url.value = 'http://www.fooinc.com'
paykey = cert.data.paymentkeys.add()
paykey.usage = paykey.PAYMENT
paykey.algorithm.type = paykey.algorithm.P2CSINGLE # is default anyway
paykey.algorithm.version = '0.1'
key = paykey.value.append("02537af4f00677d5f13916618f865abcac79a51e485ad0da26aab2532f499af376".decode('hex'))
# this is standard in bitcoin ripemd(sha256())
from bitcoin import hash_160
# add signature to cert
#sig = cert.signatures.add()
#sig.algorithm.type = sig.algorithm.BCPKI
#sig.algorithm.version = "0.3"
#sig.value = "foo1" # for signatures of type BCPKI the alias IS the value,
# other types place the signature of BitcoinCertDataToHash(certData) here,
# for BCPKI this hash appears in the blockchain instead
# see how the cert looks
print cert
# serialize it
def CertToAscii(cert):
ser = cert.SerializeToString()
crc = binascii.crc32(ser) & 0xffffff # keep only last 24 bit (should use CRC-24 like OpenPGP)
# OpenPGP uses initializations for its crc-24, see http://tools.ietf.org/html/rfc2440
asc = binascii.b2a_base64(cert.SerializeToString())[:-1] # without trailing newline
asc += '=' # checksum is seperated by =
asc += binascii.b2a_base64(('%06x'%crc).decode('hex'))
return asc
def CertToAsciiMsg(cert):
ver = cert.version
asc = CertToAscii(cert)
res = '-----BEGIN BTCPKI CERTIFICATE-----\n'
res += 'Version: '+cert.version+'\n\n'
res += '\n'.join(asc[i:i+72] for i in xrange(0, len(asc), 72))
res += '-----END BTCPKI CERTIFICATE-----\n'
return res
# TODO: AsciiToCert
from e import derivepubkey
#print "deriving filename from: "+normalized
#fname = id+'.bcrt'
fname = 'foo1_p2csingle.bcrt'
f=open(fname,'wb')
f.write(cert.SerializeToString())
f.close()
print "binary cert written to: "+fname
#fname = id+'.acrt'
#f=open(fname,'wb')
#f.write(CertToAscii(cert))
#f.close()
#print "ascii cert written to: "+fname
#fname = 'my.data'
#f=open(fname,'wb')
#f.write(cert.data.SerializeToString())
#f.close()
#print "binary data part written to: "+fname
# see the hash
print "hash of data part is: "+hash_160(cert.data.SerializeToString()).encode('hex')
print "hex binary cert: "+cert.SerializeToString().encode('hex')
#print CertToAscii(cert)
#print CertToAsciiMsg(cert)
# OLD
#from subprocess import Popen,PIPE,check_call,call
#p = Popen(['./bitcoind','-testnet','registeralias','foo3','0.5',hash],stdout=PIPE)
#result = p.stdout.read()
#print result
| bcpki/bitcoin | src/bcert/examples/mk_foo1_p2csingle.py | Python | mit | 2,900 |
#!/usr/bin/env python3
from classes.myqueue import MyQueue
import time # time.sleep(0.02)
import random # random.randint(1, 100)
import socket # socket.gethostname()
import sys
import argparse
CONF_DB = {
'server': 'localhost',
'user': 'root',
'pass': 'x',
'db': 'myqueue'
}
def worker_create(q, amount):
# makes objects in state new
hostname = socket.gethostname()
while amount > 0:
amount -= 1
objectname = "{}_{}_{}".format(hostname, int(time.time()), random.randint(1,10000000))
q.object_add(objectname)
def worker_update(q, amount):
# changes objects into status running
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('new')[0]['object']
q.object_update_status(name=objectid, status='running')
except IndexError: # happens when there are no new objects
pass
def worker_finish(q, amount):
# changes objects into status done
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('running')[0]['object']
q.object_update_status(name=objectid, status='done')
except IndexError: # happens when there are no running objects
pass
def main(args):
q = MyQueue(CONF_DB)
with q:
# using "with" ensures db exit, not worked on my testing with the db library
# see __enter__ & __exit__ in MyQueue Class
if args.type == 'create':
worker_create(q, args.amount)
elif args.type == 'update':
worker_update(q, args.amount)
elif args.type == 'finish':
worker_finish(q, args.amount)
else:
print('shit happens')
sys.exit(1)
# mysql> select status, count(object) as count from queue group by status order by count DESC
# set global general_log = 'ON';
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='its me, the python queue...')
parser.add_argument('type',
default='create',
help='for type: choose between create, update and finish',
choices=['create', 'update', 'finish'],
type=str)
parser.add_argument('--amount',
type=int,
default=1000,
help='amount to create/modify/finish')
args = parser.parse_args()
main(args)
| garmann/playground | python/python_mysql_queue/app.py | Python | mit | 2,098 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer serialization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.platform import test
class LayerSerializationTest(test.TestCase):
def test_serialize_deserialize(self):
layer = keras.layers.Dense(
3, activation='relu', kernel_initializer='ones', bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.kernel_initializer.__class__,
keras.initializers.Ones)
self.assertEqual(new_layer.units, 3)
if __name__ == '__main__':
test.main()
| nburn42/tensorflow | tensorflow/python/keras/layers/serialization_test.py | Python | apache-2.0 | 1,578 |
##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import IECore
class LRUCacheTest( unittest.TestCase ) :
def test( self ) :
self.numGetterCalls = 0
def getter( key ) :
self.numGetterCalls += 1
return (
# value
{
"same" : key,
"times2" : key * 2,
"times4" : key * 4,
},
# cost
1
)
c = IECore.LRUCache( getter, 10 )
self.assertEqual( c.getMaxCost(), 10 )
c.setMaxCost( 20 )
self.assertEqual( c.getMaxCost(), 20 )
c.setMaxCost( 10 )
self.assertEqual( c.getMaxCost(), 10 )
v = c.get( 10 )
self.assertEqual( v,
{
"same" : 10,
"times2" : 20,
"times4" : 40,
}
)
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
v2 = c.get( 10 )
self.failUnless( v2 is v )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
for k in range( 11, 10000 ) :
v = c.get( k )
self.assertEqual( v,
{
"same" : k,
"times2" : k * 2,
"times4" : k * 4,
}
)
self.failIf( c.currentCost() > 10 )
def testClearCausesReloads( self ) :
self.numGetterCalls = 0
self.multiplier = 2
def getter( key ) :
self.numGetterCalls += 1
return ( key * self.multiplier, 1 )
c = IECore.LRUCache( getter, 10 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
c.clear()
self.multiplier = 4
v = c.get( 10 )
self.assertEqual( v, 40 )
self.assertEqual( self.numGetterCalls, 2 )
def testThreadingAndLimitCost( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 10 )
def thrash() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target=thrash )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
def testThreadingAndClear( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 100000 )
def f1() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
def f2() :
for i in range( 0, 10000 ) :
c.clear()
t1 = threading.Thread( target=f1 )
t2 = threading.Thread( target=f1 )
t3 = threading.Thread( target=f2 )
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
c.clear()
self.assertEqual( c.currentCost(), 0 )
def testYieldGILInGetter( self ) :
def getter( key ) :
# this call simulates the gil getting
# yielded for some reason - in the real world
# perhaps an Op call or just the python interpreter
# deciding to switch threads.
time.sleep( 0.1 )
return ( key, 1 )
c = IECore.LRUCache( getter, 100000 )
def f() :
c.get( 0 )
t1 = threading.Thread( target=f )
t2 = threading.Thread( target=f )
t1.start()
t2.start()
t1.join()
t2.join()
def testRemovalCallback( self ) :
def getter( key ) :
return ( key * 2, 1 )
removed = []
def removalCallback( key, value ) :
removed.append( ( key, value ) )
c = IECore.LRUCache( getter, removalCallback, 5 )
self.assertEqual( c.get( 1 ), 2 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 2 ), 4 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 3 ), 6 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 4 ), 8 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 6 ), 12 )
self.assertEqual( removed, [ ( 1, 2 ) ] )
self.assertEqual( c.get( 7 ), 14 )
self.assertEqual( removed, [ ( 1, 2 ), ( 2, 4 ) ] )
c.clear()
self.assertEqual( len( removed ), 7 )
keys = [ x[0] for x in removed ]
for i in range( 1, 8 ) :
self.failUnless( i in keys )
def testSet( self ) :
def getter( key ) :
return ( None, 1 )
c = IECore.LRUCache( getter, 1000 )
c.set( 5, 10, 1 )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( c.currentCost(), 1 )
c.set( 5, 20, 100000 )
self.assertEqual( c.currentCost(), 0 )
self.assertEqual( c.get( 5 ), None )
self.assertEqual( c.currentCost(), 1 )
def testCPPThreading( self ) :
# arguments are :
# iterations, number of unique values, maximum cost, clear frequency
# cache exactly the right size
IECore.testLRUCacheThreading( 100000, 100, 100 )
# cache not quite big enough
IECore.testLRUCacheThreading( 100000, 100, 90 )
# cache thrashing like crazy
IECore.testLRUCacheThreading( 100000, 1000, 2 )
# clearing all the time while doing concurrent lookups
IECore.testLRUCacheThreading( 100000, 1000, 90, 20 )
if __name__ == "__main__":
unittest.main()
| lento/cortex | test/IECore/LRUCacheTest.py | Python | bsd-3-clause | 6,773 |
# Copyright 2013 Simonas Kazlauskas
# 2015-2020 Nick Boultbee
# 2019 Joschua Gandert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import glob
import os.path
import re
import sre_constants
from typing import Text
from senf import fsn2text
from quodlibet import _
from quodlibet.plugins.cover import CoverSourcePlugin
from quodlibet.util.dprint import print_w
from quodlibet import config
def get_ext(s):
return os.path.splitext(s)[1].lstrip('.')
def prefer_embedded():
return config.getboolean("albumart", "prefer_embedded", False)
def word_regex(s: Text) -> re.Pattern:
return re.compile(r'(\b|_)' + s + r'(\b|_)')
class EmbeddedCover(CoverSourcePlugin):
PLUGIN_ID = "embed-cover"
PLUGIN_NAME = _("Embedded album covers")
PLUGIN_DESC = _("Uses covers embedded into audio files.")
embedded = True
@classmethod
def group_by(cls, song):
# one group per song
return song.key
@staticmethod
def priority():
return 0.85 if prefer_embedded() else 0.7
@property
def cover(self):
if self.song.has_images:
image = self.song.get_primary_image()
return image.file if image else None
class FilesystemCover(CoverSourcePlugin):
PLUGIN_ID = "filesystem-cover"
PLUGIN_NAME = _("Filesystem cover")
PLUGIN_DESC = _("Uses commonly named images found in common directories " +
"alongside the song.")
DEBUG = False
cover_subdirs = {"scan", "scans", "images", "covers", "artwork"}
cover_exts = {"jpg", "jpeg", "png", "gif"}
cover_name_regexes = {word_regex(s)
for s in ("^folder$", "^cover$", "^front$")}
cover_positive_regexes = {word_regex(s)
for s in
[".+front", "frontcover", "jacket", "albumart",
"edited", ".+cover"]}
cover_negative_regexes = {word_regex(s)
for s in ["back", "inlay", "inset", "inside"]}
@classmethod
def group_by(cls, song):
# in the common case this means we only search once per album
return song('~dirname'), song.album_key
@property
def name(self):
return "Filesystem"
def __str__(self):
return "Filesystem in %s" % (self.group_by(self.song)[0])
@staticmethod
def priority():
return 0.80
@property
def cover(self):
# TODO: Deserves some refactoring
if not self.song.is_file:
return None
base = self.song('~dirname')
images = []
if config.getboolean("albumart", "force_filename"):
score = 100
for filename in config.get("albumart", "filename").split(","):
# Remove white space to avoid confusion (e.g. "name, name2")
filename = filename.strip()
escaped_path = os.path.join(glob.escape(base), filename)
try:
for path in glob.glob(escaped_path):
images.append((score, path))
except sre_constants.error:
# Use literal filename if globbing causes errors
path = os.path.join(base, filename)
# We check this here, so we can search for alternative
# files in case no preferred file was found.
if os.path.isfile(path):
images.append((score, path))
# So names and patterns at the start are preferred
score -= 1
if not images:
entries = []
try:
entries = os.listdir(base)
except EnvironmentError:
print_w("Can't list album art directory %s" % base)
fns = []
for entry in entries:
lentry = entry.lower()
if get_ext(lentry) in self.cover_exts:
fns.append((None, entry))
if lentry in self.cover_subdirs:
subdir = os.path.join(base, entry)
sub_entries = []
try:
sub_entries = os.listdir(subdir)
except EnvironmentError:
pass
for sub_entry in sub_entries:
lsub_entry = sub_entry.lower()
if get_ext(lsub_entry) in self.cover_exts:
fns.append((entry, sub_entry))
for sub, fn in fns:
dec_lfn = os.path.splitext(fsn2text(fn))[0].lower()
score = 0
# check for the album label number
labelid = self.song.get("labelid", "").lower()
if labelid and labelid in dec_lfn:
score += 20
# Track-related keywords
values = set(self.song.list("~people")) | {self.song("album")}
lowers = [value.lower().strip() for value in values
if len(value) > 1]
total_terms = sum(len(s.split()) for s in lowers)
total_words = len([word for word in dec_lfn.split()
if len(word) > 1])
# Penalise for many extra words in filename (wrong file?)
length_penalty = (- int((total_words - 1) / total_terms)
if total_terms else 0)
# Matching tag values are very good
score += 3 * sum([value in dec_lfn for value in lowers])
# Well known names matching exactly (folder.jpg)
score += 4 * sum(r.search(dec_lfn) is not None
for r in self.cover_name_regexes)
# Generic keywords
score += 2 * sum(r.search(dec_lfn) is not None
for r in self.cover_positive_regexes)
score -= 3 * sum(r.search(dec_lfn) is not None
for r in self.cover_negative_regexes)
sub_text = f" (in {sub!r})" if sub else ""
if self.DEBUG:
print(f"[{self.song('~~people~title')}]: "
f"Album art {fn!r}{sub_text} "
f"scores {score} ({length_penalty})")
score += length_penalty
# Let's only match if we're quite sure.
# This allows other sources to kick in
if score > 2:
if sub is not None:
fn = os.path.join(sub, fn)
images.append((score, os.path.join(base, fn)))
images.sort(reverse=True)
for score, path in images:
# could be a directory
if not os.path.isfile(path):
continue
try:
return open(path, "rb")
except IOError:
print_w("Failed reading album art \"%s\"" % path)
return None
| Mellthas/quodlibet | quodlibet/util/cover/built_in.py | Python | gpl-2.0 | 7,271 |
from .plots import Plot,PlotError
from .. import context
from .. import items
from .. import maps
from .. import waypoints
from .. import monsters
from .. import dialogue
from .. import services
from .. import teams
from .. import characters
import random
from .. import randmaps
from .. import stats
from .. import spells
from .. import aibrain
class EarthbindTester( monsters.base.Monster ):
name = "Earthbind Tester"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 12, stats.REFLEXES: 17, \
stats.INTELLIGENCE: 80, stats.PIETY: 80, stats.CHARISMA: 4,
stats.PHYSICAL_ATTACK: 5, stats.NATURAL_DEFENSE: 5 }
SPRITENAME = "monster_animals.png"
FRAME = 9
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = None
HABITAT = ( context.HAB_BUILDING, context.HAB_TUNNELS,
context.SET_EVERY,
context.DES_EARTH, context.DES_CIVILIZED,
context.MTY_BEAST, context.MTY_CREATURE, context.GEN_NATURE )
ENC_LEVEL = 1
TECHNIQUES = ( spells.earthspells.EARTHBIND, )
COMBAT_AI = aibrain.BasicTechnicalAI()
ATTACK = items.Attack( (1,4,0), element = stats.RESIST_PIERCING )
def init_monster( self ):
self.levels.append( monsters.base.Beast( 1, self ) )
class TestEncounter( Plot ):
LABEL = "zTEST_FEATURE"
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
room = randmaps.rooms.FuzzyRoom()
myteam = teams.Team(default_reaction=-999, rank=self.rank,
strength=0, habitat=None )
room.contents.append( myteam )
monster = monsters.ignan.Azer( myteam )
room.contents.append( monster )
room.contents.append( waypoints.HealingFountain() )
mychest = waypoints.MediumChest()
mychest.stock(20)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class SmallTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=100, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.SmallChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class MediumTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=125, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.MediumChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class LargeTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE") and pstate.rank > 1
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=160, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.LargeChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class WildAntagonists( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_WILDERNESS in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
myhabitat=scene.get_encounter_request()
myhabitat[ context.MTY_HUMANOID ] = context.MAYBE
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=100, habitat=scene.get_encounter_request(), fac=scene.fac ) )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class WildEncounter( Plot ):
LABEL = "ENCOUNTER"
active = True
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist and be wilderness."""
return ( pstate.elements.get("LOCALE")
and context.MAP_WILDERNESS in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
# Add an encounter, monsters must be MTY_BEAST, favoring GEN_NATURE.
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
myhabitat=scene.get_encounter_request()
myhabitat[ context.MTY_BEAST ] = context.PRESENT
myhabitat[ context.GEN_NATURE ] = context.MAYBE
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=random.randint(90,120), habitat=myhabitat ) )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
| jwvhewitt/dmeternal | old_game/narrator/encounters.py | Python | gpl-2.0 | 6,062 |
from radical.entk import Task
import threading
import Queue
import sys
import time
from pympler import asizeof
from datetime import datetime
from multiprocessing import Process
kill = threading.Event()
DATA = ''
def push_function(q, name):
try:
start_time = time.time()
tasks_pushed = 0
f = open('thread_%s.txt'%name,'w')
#header = 'timestamp\n'
#f.write(header)
while not kill.is_set():
#t = Task()
t = DATA
q.put(t)
#tasks_pushed +=1
cur_time = time.time()
#if tasks_pushed%10000 == 0:
#print '%s: Push average throughput: %s tasks/sec'%(name, float(tasks_pushed/(cur_time - start_time)))
#print '%s: Size of queue: %s, %s B'%(name, q.qsize(), asizeof.asizeof(q))
line = '%s\n' %cur_time
f.write(line)
#f.close()
except KeyboardInterrupt:
print 'Push thread killed'
#f.close()
except Exception,ex:
print 'Unexpected error: %s'%ex
#f.close()
def pop_function(q, name):
try:
start_time = time.time()
tasks_popped = 0
f = open('thread_%s.txt'%name,'w')
#header = 'timestamp\n'
#f.write(header)
while not kill.is_set():
try:
#t = q.get(timeout=2)
#tasks_popped +=1
#cur_time = time.time()
cur_time = time.time()
#if tasks_popped%10000 == 0:
# print '%s: Pop average throughput: %s tasks/sec'%(name, float(tasks_popped/(cur_time - start_time)))
# print '%s: Size of queue: %s, %s B'%(name, q.qsize(), asizeof.asizeof(q))
line = '%s\n' %cur_time
f.write(line)
except Queue.Empty:
pass
f.close()
except KeyboardInterrupt:
print 'Push thread killed'
f.close()
except Exception,ex:
print 'Unexpected error: %s'%ex
f.close()
if __name__ == '__main__':
num_push_threads = 1
num_pop_threads = 1
num_queues = 1
print 'Size of DATA: %s'%(asizeof.asizeof(DATA))
print 'Size of DATA: %s'%(sys.getsizeof(DATA))
print 'Size of Task: %s'%(asizeof.asizeof(Task()))
if (num_queues>num_pop_threads)or(num_queues>num_push_threads):
print 'Check number of queues, threads'
sys.exit(1)
# Create the queues first
q_list = []
pop_threads = []
push_threads = []
try:
for n in range(num_queues):
q_list.append(Queue.Queue())
print 'Queues created'
# Start popping threads and assign queues
for t in range(num_pop_threads):
cur_q = t%len(q_list) # index of queue to be used
name = 'pop_%s_queue_%s'%(t,cur_q)
#t1 = threading.Thread(target=pop_function, args=(q_list[cur_q],name), name=name)
t1 = Process(target=pop_function, args=(q_list[cur_q],name), name=name)
t1.start()
pop_threads.append(t1)
print 'Pop threads created'
print 'start time: ', time.time()
# Start pushing threads and assign queues
for t in range(num_push_threads):
cur_q = t%len(q_list) # index of queue to be used
name = 'push_%s_queue_%s'%(t,cur_q)
#t1 = threading.Thread(target=push_function, args=(q_list[cur_q],name), name=name)
t1 = Process(target=push_function, args=(q_list[cur_q],name), name=name)
t1.start()
push_threads.append(t1)
print 'Push threads created '
while True:
#time.sleep(1)
pass
except KeyboardInterrupt:
print 'Main process killed'
kill.set()
for t in pop_threads:
t.join()
for t in push_threads:
t.join()
except Exception, ex:
print 'Unknown error: %s' %ex
kill.set()
for t in pop_threads:
t.join()
for t in push_threads:
t.join() | vivek-bala/radical.entk | design_helpers/threading_throughput/runme_threads.py | Python | mit | 4,138 |
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements DataFlow Job functionality."""
from . import _job
class DataflowJob(_job.Job):
"""Represents a DataFlow Job.
"""
def __init__(self, runner_results):
"""Initializes an instance of a DataFlow Job.
Args:
runner_results: a DataflowPipelineResult returned from Pipeline.run().
"""
super(DataflowJob, self).__init__(runner_results._job.name)
self._runner_results = runner_results
def _refresh_state(self):
""" Refresh the job info. """
# DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves
# as a workaround.
self._runner_results._job = (
self._runner_results._runner.dataflow_client.get_job(self._runner_results.job_id()))
self._is_complete = self._runner_results.state in ['STOPPED', 'DONE', 'FAILED', 'CANCELLED']
self._fator_error = getattr(self._runner_results._runner, 'last_error_msg', None)
| jdanbrown/pydatalab | datalab/utils/_dataflow_job.py | Python | apache-2.0 | 1,506 |
import os
from django.test.utils import override_settings
from django.test import TestCase
from casexml.apps.case.tests.util import delete_all_cases
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
class OutOfOrderCaseTest(TestCase):
def setUp(self):
super(OutOfOrderCaseTest, self).setUp()
delete_all_cases()
def testOutOfOrderSubmissions(self):
dir = os.path.join(os.path.dirname(__file__), "data", "ordering")
for fname in ('update_oo.xml', 'create_oo.xml'):
with open(os.path.join(dir, fname), "rb") as f:
xml_data = f.read()
submit_form_locally(xml_data, 'test-domain')
case = CaseAccessors().get_case('30bc51f6-3247-4966-b4ae-994f572e85fe')
self.assertEqual('from the update form', case.pupdate)
self.assertEqual('from the create form', case.pcreate)
self.assertEqual('overridden by the update form', case.pboth)
| qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/case/tests/test_out_of_order_processing.py | Python | bsd-3-clause | 1,076 |
import natmsgclib
import natmsgactions
# This is a test of manually recovering raw shards from a download
# temp directory that is created by natmscc_0_1.py
natmsgclib.nm_start()
natmsgclib.VERBOSITY = 9
#natmsgactions.unpack_metadata_files(inbound_save_dir='~/nm_mail/Identity1/incoming/20150225_175352.20',
# private_box_id='PRV004001010113CC95900BF7D64498E8A23D6D00E3862CF3B29E2B597DB492BC65CCADF11AF529AF8914B7B2B4290E6F86D54DC1E6C4CB0AA08EFE1658465836B5808C2276CC',
# fetch_id='20150225_140127.56', max_shard_count=3,
# delete_shard_for_testing=False, delete_temp_files=False)
rc = natmsgactions.unpack_metadata_files(inbound_save_dir='/home/H1Ent/nm_mail/Identity1/incoming/20150301_000122.39/0001/shardtmp-z_q9o1g5',
private_box_id='PRV004001010113CC95900BF7D64498E8A23D6D00E3862CF3B29E2B597DB492BC65CCADF11AF529AF8914B7B2B4290E6F86D54DC1E6C4CB0AA08EFE1658465836B5808C2276CC',
fetch_id='20150225_140127.56', max_shard_count=3,
delete_shard_for_testing=False, delete_temp_files=False)
print('rc was ' + str(rc))
| naturalmessage/natmsgcc | natmsgcc/recover.py | Python | gpl-3.0 | 1,029 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TouristicContent.approved'
db.add_column('t_t_contenu_touristique', 'labellise',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'TouristicEvent.approved'
db.add_column('t_t_evenement_touristique', 'labellise',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TouristicContent.approved'
db.delete_column('t_t_contenu_touristique', 'labellise')
# Deleting field 'TouristicEvent.approved'
db.delete_column('t_t_evenement_touristique', 'labellise')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'cirkwi.cirkwitag': {
'Meta': {'ordering': "['name']", 'object_name': 'CirkwiTag', 'db_table': "'o_b_cirkwi_tag'"},
'eid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"})
},
u'common.recordsource': {
'Meta': {'ordering': "['name']", 'object_name': 'RecordSource', 'db_table': "'o_b_source_fiche'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'common.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
'cirkwi': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cirkwi.CirkwiTag']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.datasource': {
'Meta': {'ordering': "['title', 'url']", 'object_name': 'DataSource', 'db_table': "'t_t_source_donnees'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'targets': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'titre'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_column': "'type'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'db_column': "'url'"})
},
u'tourism.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'t_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'commune'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'telephone'", 'blank': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'photo'", 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'db_column': "'code'", 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'rue'", 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'desks'", 'db_column': "'type'", 'to': u"orm['tourism.InformationDeskType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.informationdesktype': {
'Meta': {'ordering': "['label']", 'object_name': 'InformationDeskType', 'db_table': "'t_b_type_renseignement'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'tourism.touristiccontent': {
'Meta': {'object_name': 'TouristicContent', 'db_table': "'t_t_contenu_touristique'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contents'", 'db_column': "'categorie'", 'to': u"orm['tourism.TouristicContentCategory']"}),
'contact': ('django.db.models.fields.TextField', [], {'db_column': "'contact'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'eid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'id_externe'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'practical_info': ('django.db.models.fields.TextField', [], {'db_column': "'infos_pratiques'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'review': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'relecture'"}),
'source': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristiccontents'", 'to': u"orm['common.RecordSource']", 'db_table': "'t_r_contenu_touristique_source'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristiccontents'", 'to': u"orm['common.Theme']", 'db_table': "'t_r_contenu_touristique_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'type1': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents1'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type1'", 'to': u"orm['tourism.TouristicContentType']"}),
'type2': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'contents2'", 'blank': 'True', 'db_table': "'t_r_contenu_touristique_type2'", 'to': u"orm['tourism.TouristicContentType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.touristiccontentcategory': {
'Meta': {'ordering': "['order', 'label']", 'object_name': 'TouristicContentCategory', 'db_table': "'t_b_contenu_touristique_categorie'"},
'geometry_type': ('django.db.models.fields.CharField', [], {'default': "'point'", 'max_length': '16', 'db_column': "'type_geometrie'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'tri'", 'blank': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"}),
'type1_label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label_type1'", 'blank': 'True'}),
'type2_label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label_type2'", 'blank': 'True'})
},
u'tourism.touristiccontenttype': {
'Meta': {'ordering': "['label']", 'object_name': 'TouristicContentType', 'db_table': "'t_b_contenu_touristique_type'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'types'", 'db_column': "'categorie'", 'to': u"orm['tourism.TouristicContentCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_list': ('django.db.models.fields.IntegerField', [], {'db_column': "'liste_choix'"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'})
},
u'tourism.touristicevent': {
'Meta': {'ordering': "['-begin_date']", 'object_name': 'TouristicEvent', 'db_table': "'t_t_evenement_touristique'"},
'accessibility': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'accessibilite'", 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_debut'", 'blank': 'True'}),
'booking': ('django.db.models.fields.TextField', [], {'db_column': "'reservation'", 'blank': 'True'}),
'contact': ('django.db.models.fields.TextField', [], {'db_column': "'contact'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'duree'", 'blank': 'True'}),
'eid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'id_externe'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_fin'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meeting_point': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'point_rdv'", 'blank': 'True'}),
'meeting_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'db_column': "'heure_rdv'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'organizer': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'organisateur'", 'blank': 'True'}),
'participant_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nb_places'", 'blank': 'True'}),
'practical_info': ('django.db.models.fields.TextField', [], {'db_column': "'infos_pratiques'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'review': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'relecture'"}),
'source': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristicevents'", 'to': u"orm['common.RecordSource']", 'db_table': "'t_r_evenement_touristique_source'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'speaker': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'intervenant'", 'blank': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'target_audience': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_column': "'public_vise'", 'blank': 'True'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'touristic_events'", 'to': u"orm['common.Theme']", 'db_table': "'t_r_evenement_touristique_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tourism.TouristicEventType']", 'null': 'True', 'db_column': "'type'", 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.touristiceventtype': {
'Meta': {'ordering': "['type']", 'object_name': 'TouristicEventType', 'db_table': "'t_b_evenement_touristique_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'type'"})
}
}
complete_apps = ['tourism']
| mabhub/Geotrek | geotrek/tourism/migrations/0027_auto__add_field_touristiccontent_labelled__add_field_touristicevent_la.py | Python | bsd-2-clause | 16,901 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import mass_mailing
import mail_mail
import mail_thread
import wizard
import controllers
| trabacus-softapps/openerp-8.0-cc | openerp/addons/mass_mailing/__init__.py | Python | agpl-3.0 | 1,074 |
"""The tests for marytts tts platforms."""
| fbradyirl/home-assistant | tests/components/marytts/__init__.py | Python | apache-2.0 | 43 |
#!/usr/bin/env python3
# coding: utf-8
# Pythonista appex script to copy and unpack a repo zipfile from GitHub
import appex, os, zipfile # noqa
if appex.is_running_extension():
srce_path = appex.get_file_path()
from_gh = os.path.abspath(os.path.expanduser("from GitHub"))
os.makedirs(from_gh, exist_ok=True)
with zipfile.ZipFile(srce_path) as zip_file:
zip_file.extractall(from_gh)
dir_name, _ = os.path.splitext(os.path.split(srce_path)[-1])
msg = "Files were unzipped into ~/"
print(msg + os.path.relpath(os.path.join(from_gh, dir_name)))
else: # Error handling...
print(
"""=====
* In Safari browser, navigate to a GitHub repo of interest.
* Tap the green 'Clone or download' button.
* Tap 'Download ZIP'. (Big repos may take seveal seconds to download).
* Tap 'Open in...'.
* Tap 'Run Pythonista Script'.
* Pick this script and tap the run button.
* When you return to Pythonista the files should be in '~/from GitHub/'."""
)
| cclauss/Ten-lines-or-less | read_zipfile_from_github.py | Python | apache-2.0 | 988 |
from socket import *
JSESSIONID =0
data = '''GET /ntss/ HTTP/1.1
Host: 192.168.176.246:8080
Connection: keep-alive
'''
data2 ='''POST /ntss/login!welcome.action HTTP/1.1
Host: 192.168.176.246:8080
Connection: keep-alive
Content-Length: 87
Cache-Control: max-age=0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Origin: http://192.168.176.246:8080
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36
Content-Type: application/x-www-form-urlencoded
Referer: http://192.168.176.246:8080/ntss/
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8
Cookie: JSESSIONID=CDCA1AA3A4DA5C371E07B2C336016690
account=3138010031&pwd=3138010031&verifycode=1865&mmtip=&ok=%E7%A1%AE%E3%80%80%E8%AE%A4
'''
data3 = '''GET /ntss/verifycode.servlet HTTP/1.1
Host: 192.168.176.246:8080
Connection: keep-alive
Cookie: JSESSIONID=60441D87D39982121B6382E4F17AF164
'''
print(data)
client = socket(AF_INET, SOCK_STREAM)
client.connect(('192.168.176.246',8080))
client.send(data.encode('utf8'))
data1 = client.recv(1024)
#data1 =data1+ client.recv(1024)
sw = data1.split('JSESSIONID=')
JSESSIONID = (sw[1][0:32])
print(JSESSIONID)
#print('%s' %data1)
#print('%s' %data1.decode('utf8'))
#for i in data1:
# print(i)
fp = open("test.txt",'w')
fp.writelines(data1)
fp.close()
| relzhong/SMUClass | test.py | Python | gpl-2.0 | 1,375 |
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest import test
class RolesNegativeTestJSON(base.BaseIdentityV2AdminTest):
def _get_role_params(self):
self.data.setup_test_user()
self.data.setup_test_role()
user = self.get_user_by_name(self.data.test_user)
tenant = self.get_tenant_by_name(self.data.test_tenant)
role = self.get_role_by_name(self.data.test_role)
return (user, tenant, role)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('d5d5f1df-f8ca-4de0-b2ef-259c1cc67025')
def test_list_roles_by_unauthorized_user(self):
# Non-administrator user should not be able to list roles
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_roles)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('11a3c7da-df6c-40c2-abc2-badd682edf9f')
def test_list_roles_request_without_token(self):
# Request to list roles without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized, self.client.list_roles)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('c0b89e56-accc-4c73-85f8-9c0f866104c1')
def test_role_create_blank_name(self):
# Should not be able to create a role with a blank name
self.assertRaises(lib_exc.BadRequest, self.client.create_role, '')
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('585c8998-a8a4-4641-a5dd-abef7a8ced00')
def test_create_role_by_unauthorized_user(self):
# Non-administrator user should not be able to create role
role_name = data_utils.rand_name(name='role-')
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.create_role, role_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('a7edd17a-e34a-4aab-8bb7-fa6f498645b8')
def test_create_role_request_without_token(self):
# Request to create role without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
role_name = data_utils.rand_name(name='role-')
self.assertRaises(lib_exc.Unauthorized,
self.client.create_role, role_name)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('c0cde2c8-81c1-4bb0-8fe2-cf615a3547a8')
def test_role_create_duplicate(self):
# Role names should be unique
role_name = data_utils.rand_name(name='role-dup-')
body = self.client.create_role(role_name)
role1_id = body.get('id')
self.addCleanup(self.client.delete_role, role1_id)
self.assertRaises(lib_exc.Conflict, self.client.create_role,
role_name)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('15347635-b5b1-4a87-a280-deb2bd6d865e')
def test_delete_role_by_unauthorized_user(self):
# Non-administrator user should not be able to delete role
role_name = data_utils.rand_name(name='role-')
body = self.client.create_role(role_name)
self.data.roles.append(body)
role_id = body.get('id')
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.delete_role, role_id)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('44b60b20-70de-4dac-beaf-a3fc2650a16b')
def test_delete_role_request_without_token(self):
# Request to delete role without a valid token should fail
role_name = data_utils.rand_name(name='role-')
body = self.client.create_role(role_name)
self.data.roles.append(body)
role_id = body.get('id')
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
self.client.delete_role,
role_id)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('38373691-8551-453a-b074-4260ad8298ef')
def test_delete_role_non_existent(self):
# Attempt to delete a non existent role should fail
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(lib_exc.NotFound, self.client.delete_role,
non_existent_role)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('391df5cf-3ec3-46c9-bbe5-5cb58dd4dc41')
def test_assign_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.assign_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('f0d2683c-5603-4aee-95d7-21420e87cfd8')
def test_assign_user_role_request_without_token(self):
# Request to assign a role to a user without a valid token
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
self.client.assign_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('99b297f6-2b5d-47c7-97a9-8b6bb4f91042')
def test_assign_user_role_for_non_existent_role(self):
# Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('b2285aaa-9e76-4704-93a9-7a8acd0a6c8f')
def test_assign_user_role_for_non_existent_tenant(self):
# Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
non_existent_tenant = str(uuid.uuid4().hex)
self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('5c3132cd-c4c8-4402-b5ea-71eb44e97793')
def test_assign_duplicate_user_role(self):
# Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(lib_exc.Conflict, self.client.assign_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('d0537987-0977-448f-a435-904c15de7298')
def test_remove_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# remove a user's role
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.remove_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('cac81cf4-c1d2-47dc-90d3-f2b7eb572286')
def test_remove_user_role_request_without_token(self):
# Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
self.client.remove_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('ab32d759-cd16-41f1-a86e-44405fa9f6d2')
def test_remove_user_role_non_existent_role(self):
# Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(lib_exc.NotFound, self.client.remove_user_role,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('67a679ec-03dd-4551-bbfc-d1c93284f023')
def test_remove_user_role_non_existent_tenant(self):
# Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'],
user['id'],
role['id'])
non_existent_tenant = str(uuid.uuid4().hex)
self.assertRaises(lib_exc.NotFound, self.client.remove_user_role,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('7391ab4c-06f3-477a-a64a-c8e55ce89837')
def test_list_user_roles_by_unauthorized_user(self):
# Non-administrator user should not be authorized to list
# a user's roles
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_user_roles, tenant['id'],
user['id'])
@test.attr(type=['negative', 'gate'])
@test.idempotent_id('682adfb2-fd5f-4b0a-a9ca-322e9bebb907')
def test_list_user_roles_request_without_token(self):
# Request to list user's roles without a valid token should fail
(user, tenant, role) = self._get_role_params()
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
try:
self.assertRaises(lib_exc.Unauthorized,
self.client.list_user_roles, tenant['id'],
user['id'])
finally:
self.client.auth_provider.clear_auth()
| rzarzynski/tempest | tempest/api/identity/admin/v2/test_roles_negative.py | Python | apache-2.0 | 11,624 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import logging.handlers
import os
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] or ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname).cursor() as cr:
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname, record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING[record.levelno]
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
else:
handler = logging.handlers.SysLogHandler()
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.handlers.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
handler = logging.StreamHandler(sys.stdout)
else:
# Normal Handler on standard output
handler = logging.StreamHandler(sys.stdout)
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(logging.WARNING)
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| LeartS/odoo | openerp/netsvc.py | Python | agpl-3.0 | 8,801 |
"""
This script processes the output from the C preprocessor and extracts all
qstr. Each qstr is transformed into a qstr definition of the form 'Q(...)'.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import re
import sys
import os
# Blacklist of qstrings that are specially handled in further
# processing and should be ignored
QSTRING_BLACK_LIST = set(['NULL', 'number_of'])
def write_out(fname, output):
if output:
for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]:
fname = fname.replace(m, r)
with open(args.output_dir + "/" + fname + ".qstr", "w") as f:
f.write("\n".join(output) + "\n")
def process_file(f):
re_line = re.compile(r"#[line]*\s\d+\s\"([^\"]+)\"")
re_qstr = re.compile(r'MP_QSTR_[_a-zA-Z0-9]+')
output = []
last_fname = None
for line in f:
if line.isspace():
continue
# match gcc-like output (# n "file") and msvc-like output (#line n "file")
if line.startswith(('# ', '#line')):
m = re_line.match(line)
assert m is not None
fname = m.group(1)
if not fname.endswith(".c"):
continue
if fname != last_fname:
write_out(last_fname, output)
output = []
last_fname = fname
continue
for match in re_qstr.findall(line):
name = match.replace('MP_QSTR_', '')
if name not in QSTRING_BLACK_LIST:
output.append('Q(' + name + ')')
write_out(last_fname, output)
return ""
def cat_together():
import glob
import hashlib
hasher = hashlib.md5()
all_lines = []
outf = open(args.output_dir + "/out", "wb")
for fname in glob.glob(args.output_dir + "/*.qstr"):
with open(fname, "rb") as f:
lines = f.readlines()
all_lines += lines
all_lines.sort()
all_lines = b"".join(all_lines)
outf.write(all_lines)
outf.close()
hasher.update(all_lines)
new_hash = hasher.hexdigest()
#print(new_hash)
old_hash = None
try:
with open(args.output_file + ".hash") as f:
old_hash = f.read()
except IOError:
pass
if old_hash != new_hash:
try:
# rename below might fail if file exists
os.remove(args.output_file)
except:
pass
if not os.path.isfile(args.output_file):
print("QSTR updated")
os.rename(args.output_dir + "/out", args.output_file)
with open(args.output_file + ".hash", "w") as f:
f.write(new_hash)
else:
print("QSTR not updated")
if __name__ == "__main__":
if len(sys.argv) != 5:
print('usage: %s command input_filename output_dir output_file' % sys.argv[0])
sys.exit(2)
class Args:
pass
args = Args()
args.command = sys.argv[1]
args.input_filename = sys.argv[2]
args.output_dir = sys.argv[3]
args.output_file = sys.argv[4]
try:
os.makedirs(args.output_dir)
except OSError:
pass
if args.command == "split":
with open(args.input_filename) as infile:
process_file(infile)
if args.command == "cat":
cat_together()
| swegener/micropython | py/makeqstrdefs.py | Python | mit | 3,315 |
# pylint: disable=missing-docstring
import logging
import requests
from games.models import Game, Genre
from games.util.steam import get_store_info, create_steam_installer
from platforms.models import Platform
from common.util import slugify
LOGGER = logging.getLogger(__name__)
def run():
response = requests.get(
"https://raw.githubusercontent.com/SteamDatabase/SteamLinux/master/GAMES.json"
)
linux_games = response.json()
for game_id in linux_games:
if linux_games[game_id] is not True:
LOGGER.debug(
"Game %s likely has problems, skipping. "
"This game should be added manually if appropriate.",
game_id
)
continue
if Game.objects.filter(steamid=game_id).count():
# LOGGER.debug("Game %s is already in Lutris", game_id)
continue
store_info = get_store_info(game_id)
if not store_info:
LOGGER.warning("No store info for game %s", game_id)
continue
if store_info["type"] != "game":
LOGGER.warning("%s: %s is not a game (type: %s)",
game_id, store_info["name"], store_info["type"])
continue
slug = slugify(store_info["name"])
if Game.objects.filter(slug=slug).count():
LOGGER.warning("Game %s already in Lutris but does not have a Steam ID", game_id)
continue
game = Game.objects.create(
name=store_info["name"],
slug=slug,
steamid=game_id,
description=store_info["short_description"],
website=store_info["website"] or "",
is_public=True,
)
game.set_logo_from_steam()
LOGGER.debug("%s created", game)
if store_info["platforms"]["linux"]:
platform = Platform.objects.get(slug='linux')
LOGGER.info("Creating installer for %s", game)
create_steam_installer(game)
else:
platform = Platform.objects.get(slug='windows')
game.platforms.add(platform)
for steam_genre in store_info["genres"]:
genre, created = Genre.objects.get_or_create(slug=slugify(steam_genre["description"]))
if created:
genre.name = steam_genre["description"]
LOGGER.info("Created genre %s", genre.name)
genre.save()
game.genres.add(genre)
game.save()
| lutris/website | scripts/import_steam_linux_games.py | Python | agpl-3.0 | 2,485 |
#/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
# Preliminary API to support testing
import ipc
import ipc_lwm2m_client as client
import ipc_lwm2m_server as server
class ServerAPI(object):
def __init__(self, ipcAddress, ipcPort):
self._ipc = "udp://" + ipcAddress + ":" + str(ipcPort)
# Connect
request = server.ConnectRequest(session_id=None)
response = ipc.send_request_and_receive_response(self._ipc, request.serialize())
self._session_id = server.ConnectResponse(response).session_id
print("Session ID %s" % (self._session_id,))
def __del__(self):
# Disconnect
if self._session_id is not None:
request = server.DisconnectRequest(session_id=self._session_id)
response = ipc.send_request_and_receive_response(self._ipc, request.serialize())
#print "data " + ipc.receive_datagram(self._ipc)
#import pdb; pdb.set_trace()
def GetClientList(self, clientID):
request = server.ListClientsRequest(session_id=self._session_id)
response = ipc.send_request_and_receive_response(self._ipc, request.serialize())
return server.ListClientsResponse(response).getClientIDs()
class ClientAPI(object):
# TODO
pass
| DavidAntliff/AwaLWM2M | api/python/api.py | Python | bsd-3-clause | 3,068 |
"""
Django channels.
In our ws_connect function, we will simply echo back to the client
what their reply channel address is. The reply channel is the unique address
that gets assigned to every browser client that connects to our
websockets server. This value which can be retrieved from
message.reply_channel.name can be saved or passed on to a different
function such as a Celery task so that they can also send a message back.
"""
import json
import logging
from django.shortcuts import get_object_or_404
from .models import Job, Archive
from .tasks import execute_omex
from urllib.parse import parse_qs
from celery.result import AsyncResult
from django.http import HttpResponse
from channels import Channel, Group
from channels.handler import AsgiHandler
from channels.sessions import channel_session
from channels.auth import channel_session_user, channel_session_user_from_http
from channels.security.websockets import allowed_hosts_only
log = logging.getLogger(__name__)
@allowed_hosts_only
@channel_session_user_from_http
def ws_connect(message):
""" Connection to websocket. """
# Accept connection
message.reply_channel.send({
"text": json.dumps({
"accept": True,
"action": "reply_channel",
"reply_channel": message.reply_channel.name,
})
})
@channel_session
def ws_connect(message):
message.reply_channel.send({
"text": json.dumps({
"accept": True,
"action": "reply_channel",
"reply_channel": message.reply_channel.name,
})
})
@channel_session
def ws_receive(message):
try:
data = json.loads(message['text'])
except ValueError:
log.debug("ws message isn't json text=%s", message['text'])
return
if data:
reply_channel = message.reply_channel.name
if data['action'] == "run_archive":
run_archive(data, reply_channel)
@channel_session_user
def ws_disconnect(message):
pass
# Group("chat-%s" % message.user.username[0]).discard(message.reply_channel)
def http_consumer(message):
""" Example http consumer.
:param message:
:return:
"""
# Make standard HTTP response - access ASGI path attribute directly
response = HttpResponse("Hello world! You asked for %s" % message.content['path'])
# Encode that response into message format (ASGI)
for chunk in AsgiHandler.encode_response(response):
message.reply_channel.send(chunk)
def run_archive(data, reply_channel):
log.debug("job Name=%s", data['archive_id'])
create_task = False
# get archive
archive_id = data['archive_id']
archive = get_object_or_404(Archive, pk=archive_id)
if archive.task_id:
result = AsyncResult(archive.task_id)
# Create new task and run again.
if result.status in ["FAILURE", "SUCCESS"]:
create_task = True
else:
# no execution yet
create_task = True
if create_task:
# task will send message when finished
result = execute_omex.delay(archive_id=archive_id, reply_channel=reply_channel)
archive.task_id = result.task_id
archive.save()
# Tell client task has been started
Channel(reply_channel).send({
"text": json.dumps({
"task_id": archive.task_id,
"task_status": result.status,
"archive_id": archive_id,
})
})
| matthiaskoenig/tellurium-web | teweb/combine/consumers.py | Python | lgpl-3.0 | 3,438 |
"""Regex validate PIN code.
https://www.codewars.com/kata/55f8a9c06c018a0d6e000132
ATM machines allow 4 or 6 digit PIN codes
and PIN codes cannot contain anything but exactly 4 digits or exactly 6 digits.
If the function is passed a valid PIN string, return true, else return false.
eg:
validate_pin("1234") == True
validate_pin("12345") == False
validate_pin("a234") == False
"""
def vp(pin):
"""Check if input is a valid int between 4-6 digits."""
try:
int(pin)
length = len(str(pin))
return length == 4 or length == 6
except ValueError:
return False
| benpetty/Code-Katas | katas/validate_pin/validate_pin.py | Python | mit | 604 |
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.protocol import JID
from nbxmpp.structs import BookmarkData, StanzaHandler
from nbxmpp.task import iq_request_task
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.base import BaseModule
from nbxmpp.modules.util import raise_if_error
from nbxmpp.modules.util import finalize
from nbxmpp.modules.bookmarks.util import parse_bookmark
from nbxmpp.modules.bookmarks.util import build_conference_node
BOOKMARK_OPTIONS = {
'pubsub#notify_delete': 'true',
'pubsub#notify_retract': 'true',
'pubsub#persist_items': 'true',
'pubsub#max_items': 'max',
'pubsub#access_model': 'whitelist',
'pubsub#send_last_published_item': 'never',
}
class NativeBookmarks(BaseModule):
_depends = {
'retract': 'PubSub',
'publish': 'PubSub',
'request_items': 'PubSub',
}
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_pubsub_bookmarks,
ns=Namespace.PUBSUB_EVENT,
priority=16),
]
def _process_pubsub_bookmarks(self, _client, _stanza, properties):
if not properties.is_pubsub_event:
return
if properties.pubsub_event.node != Namespace.BOOKMARKS_1:
return
item = properties.pubsub_event.item
if item is None:
# Retract, Deleted or Purged
return
try:
bookmark_item = parse_bookmark(item)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(error.stanza)
raise NodeProcessed
pubsub_event = properties.pubsub_event._replace(data=bookmark_item)
self._log.info('Received bookmark item from: %s', properties.jid)
self._log.info(bookmark_item)
properties.pubsub_event = pubsub_event
@iq_request_task
def request_bookmarks(self):
_task = yield
items = yield self.request_items(Namespace.BOOKMARKS_1)
raise_if_error(items)
bookmarks = []
for item in items:
try:
bookmark_item = parse_bookmark(item)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(error.stanza)
continue
bookmarks.append(bookmark_item)
for bookmark in bookmarks:
self._log.info(bookmark)
yield bookmarks
@iq_request_task
def retract_bookmark(self, bookmark_jid: JID):
task = yield
self._log.info('Retract Bookmark: %s', bookmark_jid)
result = yield self.retract(Namespace.BOOKMARKS_1, str(bookmark_jid))
yield finalize(task, result)
@iq_request_task
def store_bookmarks(self, bookmarks: list[BookmarkData]):
_task = yield
self._log.info('Store Bookmarks')
for bookmark in bookmarks:
self.publish(Namespace.BOOKMARKS_1,
build_conference_node(bookmark),
id_=str(bookmark.jid),
options=BOOKMARK_OPTIONS,
force_node_options=True)
yield True
| gajim/python-nbxmpp | nbxmpp/modules/bookmarks/native_bookmarks.py | Python | gpl-3.0 | 4,130 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import pprint, tarfile
pac_ext = ".mkp"
class PackageException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
if omd_root:
pac_dir = omd_root + "/var/check_mk/packages/"
else:
pac_dir = var_dir + "/packages/"
try:
os.makedirs(pac_dir)
except:
pass
# in case of local directories (OMD) use those instead
package_parts = [ (part, title, ldir and ldir or dir) for part, title, dir, ldir in [
( "checks", "Checks", checks_dir, local_checks_dir ),
( "notifications", "Notification scripts", notifications_dir, local_notifications_dir ),
( "checkman", "Checks' man pages", check_manpages_dir, local_check_manpages_dir ),
( "agents", "Agents", agents_dir, local_agents_dir ),
( "web", "Multisite extensions", web_dir, local_web_dir ),
( "pnp-templates", "PNP4Nagios templates", pnp_templates_dir, local_pnp_templates_dir ),
( "doc", "Documentation files", doc_dir, local_doc_dir ),
]]
def packaging_usage():
sys.stdout.write("""Usage: check_mk [-v] -P|--package COMMAND [ARGS]
Available commands are:
create NAME ... Collect unpackaged files into new package NAME
pack NAME ... Create package file from installed package
release NAME ... Drop installed package NAME, release packaged files
find ... Find and display unpackaged files
list ... List all installed packages
list NAME ... List files of installed package
list PACK.mkp ... List files of uninstalled package file
show NAME ... Show information about installed package
show PACK.mkp ... Show information about uninstalled package file
install PACK.mkp ... Install or update package from file PACK.mkp
remove NAME ... Uninstall package NAME
-v enables verbose output
Package files are located in %s.
""" % pac_dir)
def do_packaging(args):
if len(args) == 0:
packaging_usage()
sys.exit(1)
command = args[0]
args = args[1:]
commands = {
"create" : package_create,
"release" : package_release,
"list" : package_list,
"find" : package_find,
"show" : package_info,
"pack" : package_pack,
"remove" : package_remove,
"install" : package_install,
}
f = commands.get(command)
if f:
try:
f(args)
except PackageException, e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
else:
allc = commands.keys()
allc.sort()
allc = [ tty_bold + c + tty_normal for c in allc ]
sys.stderr.write("Invalid packaging command. Allowed are: %s and %s.\n" %
(", ".join(allc[:-1]), allc[-1]))
sys.exit(1)
def package_list(args):
if len(args) > 0:
for name in args:
show_package_contents(name)
else:
if opt_verbose:
table = []
for pacname in all_packages():
package = read_package(pacname)
table.append((pacname, package["title"], package["num_files"]))
print_table(["Name", "Title", "Files"], [ tty_bold, "", "" ], table)
else:
for pacname in all_packages():
sys.stdout.write("%s\n" % pacname)
def package_info(args):
if len(args) == 0:
raise PackageException("Usage: check_mk -P show NAME|PACKAGE.mkp")
for name in args:
show_package_info(name)
def show_package_contents(name):
show_package(name, False)
def show_package_info(name):
show_package(name, True)
def show_package(name, show_info = False):
try:
if name.endswith(pac_ext):
tar = tarfile.open(name, "r:gz")
info = tar.extractfile("info")
package = eval(info.read())
else:
package = read_package(name)
if not package:
raise PackageException("No such package %s." % name)
if show_info:
sys.stdout.write("Package file: %s%s\n" % (pac_dir, name))
except PackageException:
raise
except Exception, e:
raise PackageException("Cannot open package %s: %s" % (name, e))
if show_info:
sys.stdout.write("Name: %s\n" % package["name"])
sys.stdout.write("Version: %s\n" % package["version"])
sys.stdout.write("Packaged on Check_MK Version: %s\n" % package["version.packaged"])
sys.stdout.write("Required Check_MK Version: %s\n" % package["version.min_required"])
sys.stdout.write("Title: %s\n" % package["title"])
sys.stdout.write("Author: %s\n" % package["author"])
sys.stdout.write("Download-URL: %s\n" % package["download_url"])
sys.stdout.write("Files: %s\n" % \
" ".join([ "%s(%d)" % (part, len(fs)) for part, fs in package["files"].items() ]))
sys.stdout.write("Description:\n %s\n" % package["description"])
else:
if opt_verbose:
sys.stdout.write("Files in package %s:\n" % name)
for part, title, dir in package_parts:
files = package["files"].get(part, [])
if len(files) > 0:
sys.stdout.write(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for f in files:
sys.stdout.write(" %s\n" % f)
else:
for part, title, dir in package_parts:
for fn in package["files"].get(part, []):
sys.stdout.write(dir + "/" + fn + "\n")
def package_create(args):
if len(args) != 1:
raise PackageException("Usage: check_mk -P create NAME")
pacname = args[0]
if read_package(pacname):
raise PackageException("Package %s already existing." % pacname)
verbose("Creating new package %s...\n" % pacname)
filelists = {}
package = {
"title" : "Title of %s" % pacname,
"name" : pacname,
"description" : "Please add a description here",
"version" : "1.0",
"version.packaged" : check_mk_version,
"version.min_required" : check_mk_version,
"author" : "Add your name here",
"download_url" : "http://example.com/%s/" % pacname,
"files" : filelists
}
num_files = 0
for part, title, dir in package_parts:
files = unpackaged_files_in_dir(part, dir)
filelists[part] = files
num_files += len(files)
if len(files) > 0:
verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for f in files:
verbose(" %s\n" % f)
write_package(pacname, package)
verbose("New package %s created with %d files.\n" % (pacname, num_files))
verbose("Please edit package details in %s%s%s\n" % (tty_bold, pac_dir + pacname, tty_normal))
def package_find(_no_args):
first = True
for part, title, dir in package_parts:
files = unpackaged_files_in_dir(part, dir)
if len(files) > 0:
if first:
verbose("Unpackaged files:\n")
first = False
verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for f in files:
if opt_verbose:
sys.stdout.write(" %s\n" % f)
else:
sys.stdout.write("%s/%s\n" % (dir, f))
if first:
verbose("No unpackaged files found.\n")
def package_release(args):
if len(args) != 1:
raise PackageException("Usage: check_mk -P release NAME")
pacname = args[0]
pacpath = pac_dir + pacname
if not os.path.exists(pacpath):
raise PackageException("No such package %s." % pacname)
package = read_package(pacname)
os.unlink(pacpath)
verbose("Releasing files of package %s into freedom...\n" % pacname)
if opt_verbose:
for part, title, dir in package_parts:
filenames = package["files"].get(part, [])
if len(filenames) > 0:
verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for f in filenames:
verbose(" %s\n" % f)
def package_pack(args):
if len(args) != 1:
raise PackageException("Usage: check_mk -P pack NAME")
# Make sure, user is not in data directories of Check_MK
p = os.path.abspath(os.curdir)
for dir in [var_dir] + [ dir for x,y,dir in package_parts ]:
if p == dir or p.startswith(dir + "/"):
raise PackageException("You are in %s!\n"
"Please leave the directories of Check_MK before creating\n"
"a packet file. Foreign files lying around here will mix up things." % p)
pacname = args[0]
package = read_package(pacname)
if not package:
raise PackageException("Package %s not existing or corrupt." % pacname)
package["version.packaged"] = check_mk_version
tarfilename = "%s-%s%s" % (pacname, package["version"], pac_ext)
verbose("Packing %s into %s...\n" % (pacname, tarfilename))
def create_info(filename, size):
info = tarfile.TarInfo("info")
info.mtime = time.time()
info.uid = 0
info.gid = 0
info.size = size
info.mode = 0644
info.type = tarfile.REGTYPE
info.name = filename
return info
tar = tarfile.open(tarfilename, "w:gz")
info_file = fake_file(pprint.pformat(package))
info = create_info("info", info_file.size())
tar.addfile(info, info_file)
# Now pack the actual files into sub tars
for part, title, dir in package_parts:
filenames = package["files"].get(part, [])
if len(filenames) > 0:
verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for f in filenames:
verbose(" %s\n" % f)
subtarname = part + ".tar"
subdata = os.popen("tar cf - --dereference --force-local -C '%s' %s" % (dir, " ".join(filenames))).read()
info = create_info(subtarname, len(subdata))
tar.addfile(info, fake_file(subdata))
tar.close()
verbose("Successfully created %s\n" % tarfilename)
def package_remove(args):
if len(args) != 1:
raise PackageException("Usage: check_mk -P remove NAME")
pacname = args[0]
package = read_package(pacname)
if not package:
raise PackageException("No such package %s." % pacname)
verbose("Removing package %s...\n" % pacname)
for part, title, dir in package_parts:
filenames = package["files"].get(part, [])
if len(filenames) > 0:
verbose(" %s%s%s\n" % (tty_bold, title, tty_normal))
for fn in filenames:
verbose(" %s" % fn)
try:
path = dir + "/" + fn
os.remove(path)
verbose("\n")
except Exception, e:
sys.stderr.write("cannot remove %s: %s\n" % (path, e))
os.remove(pac_dir + pacname)
verbose("Successfully removed package %s.\n" % pacname)
def package_install(args):
if len(args) != 1:
raise PackageException("Usage: check_mk -P remove NAME")
path = args[0]
if not os.path.exists(path):
raise PackageException("No such file %s." % path)
tar = tarfile.open(path, "r:gz")
package = eval(tar.extractfile("info").read())
pacname = package["name"]
old_package = read_package(pacname)
if old_package:
verbose("Updating %s from version %s to %s.\n" % (pacname, old_package["version"], package["version"]))
update = True
else:
verbose("Installing %s version %s.\n" % (pacname, package["version"]))
update = False
# Before installing check for conflicts
keep_files = {}
for part, title, dir in package_parts:
packaged = packaged_files_in_dir(part)
keep = []
keep_files[part] = keep
if update:
old_files = old_package["files"].get(part, [])
for fn in package["files"].get(part, []):
path = dir + "/" + fn
if update and fn in old_files:
keep.append(fn)
elif fn in packaged:
raise PackageException("File conflict: %s is part of another package." % path)
elif os.path.exists(path):
raise PackageException("File conflict: %s already existing." % path)
# Now install files, but only unpack files explicitely listed
for part, title, dir in package_parts:
filenames = package["files"].get(part, [])
if len(filenames) > 0:
verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal))
for fn in filenames:
verbose(" %s\n" % fn)
# make sure target directory exists
if not os.path.exists(dir):
verbose(" Creating directory %s\n" % dir)
os.makedirs(dir)
tarsource = tar.extractfile(part + ".tar")
subtar = "tar xf - -C %s %s" % (dir, " ".join(filenames))
tardest = os.popen(subtar, "w")
while True:
data = tarsource.read(4096)
if not data:
break
tardest.write(data)
# In case of an update remove files from old_package not present in new one
if update:
for part, title, dir in package_parts:
filenames = old_package["files"].get(part, [])
keep = keep_files.get(part, [])
for fn in filenames:
if fn not in keep:
path = dir + "/" + fn
verbose("Removing outdated file %s.\n" % path)
try:
os.remove(path)
except Exception, e:
sys.stderr.write("Error removing %s: %s\n" % (path, e))
# Last but not least install package file
file(pac_dir + pacname, "w").write(pprint.pformat(package))
def files_in_dir(part, dir, prefix = ""):
if not os.path.exists(dir):
return []
# Handle case where one part-dir lies below another
taboo_dirs = [ d for p, t, d in package_parts if p != part ]
if dir in taboo_dirs:
return []
result = []
files = os.listdir(dir)
for f in files:
if f in [ '.', '..' ] or f.startswith('.') or f.endswith('~'):
continue
path = dir + "/" + f
if os.path.isdir(path):
result += files_in_dir(part, path, prefix + f + "/")
else:
result.append(prefix + f)
result.sort()
return result
def unpackaged_files_in_dir(part, dir):
all = files_in_dir(part, dir)
packed = packaged_files_in_dir(part)
return [ f for f in all if f not in packed ]
def packaged_files_in_dir(part):
result = []
for pacname in all_packages():
package = read_package(pacname)
if package:
result += package["files"].get(part, [])
return result
def read_package(pacname):
try:
package = eval(file(pac_dir + pacname).read())
num_files = sum([len(fl) for fl in package["files"].values() ])
package["num_files"] = num_files
return package
except IOError:
return None
except Exception:
sys.stderr.write("Ignoring invalid package file '%s%s'. Please remove it from %s!\n" % (pac_dir, pacname, pac_dir))
return None
def write_package(pacname, package):
file(pac_dir + pacname, "w").write(pprint.pformat(package) + "\n")
def all_packages():
all = [ p for p in os.listdir(pac_dir) if p not in [ '.', '..' ] ]
all.sort()
return all
| tomas-edwardsson/check_mk | modules/packaging.py | Python | gpl-2.0 | 17,534 |
from sqf.base_type import ParserType
class Comment(ParserType):
def __init__(self, string):
super().__init__()
assert (string.startswith('/*') or string.startswith('//'))
self._string = string
def __str__(self):
return self._string
def __repr__(self):
return ('C(%s)' % self).replace('\r\n', r'\r\n').replace('\n', r'\n')
class Space(ParserType):
def __str__(self):
return ' '
def __repr__(self):
return '\' \''
class Tab(ParserType):
def __str__(self):
return '\t'
def __repr__(self):
return '\\t'
class EndOfLine(ParserType):
def __init__(self, value):
super().__init__()
assert(value in ['\n', '\r\n'])
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return '<EOL>'
class BrokenEndOfLine(ParserType):
def __str__(self):
return '\\\n'
def __repr__(self):
return '<\EOL>'
class EndOfFile(ParserType):
def __str__(self):
return ''
def __repr__(self):
return '<\EOF>'
class ParserKeyword(ParserType):
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return self.value
| LordGolias/sqf | sqf/parser_types.py | Python | bsd-3-clause | 1,337 |
import weakref
import threading
from django.dispatch import saferef
from django.utils.six.moves import xrange
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call. 列表
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args) 去重复
self.lock = threading.Lock()# A factory function that returns a new primitive lock object.
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal. 发送信号
Arguments:
receiver 函数或者对象函数
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender 发送器
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid 标志调度器的 id
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect 调试, receiver 必须是可调用的
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)." 必须要接收字典
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
如果在 receivers 中已经存在, 就退出; 如果不存在, 需要添加到 receivers 中
with self.lock:
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
connect 是加入到 receivers 中, disconnect 是从 receivers 中删除
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
dispatch_uid 是可以计算的
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
def has_listeners(self, sender=None):
return bool(self._live_receivers(_make_id(sender)))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates 传播 back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.当其中一个 receiver 出现错误时, 会导致后面的 receiver 不能接受信号
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
相对 send, 捕捉了错误
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers. 过滤无用的 receivers
This checks for weak references and resolves them, then returning only
live receivers.
"""
可能在每次发信号之前都要重生 receivers 中的每一个 receiver
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
with self.lock:
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
last_idx = len(self.receivers) - 1
# enumerate in reverse order so that indexes are valid even
# after we delete some items
for idx, (r_key, _) in enumerate(reversed(self.receivers)):
if r_key == key:
del self.receivers[last_idx-idx]
修饰器
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)): 如果是列表或者 tuple
for s in signal:
s.connect(func, **kwargs) kwargs = self, receiver, sender=None, weak=True, dispatch_uid=None
else:
signal.connect(func, **kwargs)
return func
return _decorator
| Anlim/decode-Django | Django-1.5.1/django/dispatch/dispatcher.py | Python | gpl-2.0 | 10,167 |
from base64 import b64encode
from fastapi import FastAPI, Security
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.testclient import TestClient
from requests.auth import HTTPBasicAuth
app = FastAPI()
security = HTTPBasic(realm="simple", description="HTTPBasic scheme")
@app.get("/users/me")
def read_current_user(credentials: HTTPBasicCredentials = Security(security)):
return {"username": credentials.username, "password": credentials.password}
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"HTTPBasic": []}],
}
}
},
"components": {
"securitySchemes": {
"HTTPBasic": {
"type": "http",
"scheme": "basic",
"description": "HTTPBasic scheme",
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_security_http_basic():
auth = HTTPBasicAuth(username="john", password="secret")
response = client.get("/users/me", auth=auth)
assert response.status_code == 200, response.text
assert response.json() == {"username": "john", "password": "secret"}
def test_security_http_basic_no_credentials():
response = client.get("/users/me")
assert response.json() == {"detail": "Not authenticated"}
assert response.status_code == 401, response.text
assert response.headers["WWW-Authenticate"] == 'Basic realm="simple"'
def test_security_http_basic_invalid_credentials():
response = client.get(
"/users/me", headers={"Authorization": "Basic notabase64token"}
)
assert response.status_code == 401, response.text
assert response.headers["WWW-Authenticate"] == 'Basic realm="simple"'
assert response.json() == {"detail": "Invalid authentication credentials"}
def test_security_http_basic_non_basic_credentials():
payload = b64encode(b"johnsecret").decode("ascii")
auth_header = f"Basic {payload}"
response = client.get("/users/me", headers={"Authorization": auth_header})
assert response.status_code == 401, response.text
assert response.headers["WWW-Authenticate"] == 'Basic realm="simple"'
assert response.json() == {"detail": "Invalid authentication credentials"}
| tiangolo/fastapi | tests/test_security_http_basic_realm_description.py | Python | mit | 2,842 |
# -*- coding:utf-8 -*-
import scrapy
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import now_time, end_time
class HNU001_Spider(scrapy.Spider):
name = 'HNU001'
start_urls = ['http://csee.hnu.edu.cn/Front/TZXX_List?LMXX_BH=20130728174138ec48068e-48bf-49a6-ac51-27d04a9b1baa']
domain = 'http://csee.hnu.edu.cn/'
def parse(self, response):
messages = response.xpath("//ul[@class='article-list']/li")
for i, message in enumerate(messages):
report_name = message.xpath(".//a/text()").extract()[0]
report_time = get_localtime(message.xpath("span/text()").extract()[0].strip().strip("[]"))
if report_time > end_time:
continue
if report_time < now_time:
return
report_url = self.domain + message.xpath(".//a/@href").extract()[0][1:]
yield scrapy.Request(report_url, callback=self.parse_pages,
meta={'link': report_url, 'number': i + 1, 'publication': report_time, 'title': report_name})
def parse_pages(self, response):
messages = response.xpath("//div[@class='content-1']")
return {'text': messages, 'number': response.meta['number'], 'organizer': u"湖南大学大学信息科学与工程学院",
'faculty': self.name, 'link': response.meta['link'], 'publication': response.meta['publication'],
'location': u"华中:湖南省-长沙市", 'title': response.meta['title']}
| AnselCmy/ARPS | report_crawler/report_crawler/spiders/spiders_001/_H/HNU001.py | Python | mit | 1,423 |
# source: http://docs.python.org/2.7/library/datetime.html#tzinfo-objects
from datetime import tzinfo, timedelta
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt=None):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt=None):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
| smhg/sublime-suncycle | timezone.py | Python | mit | 1,762 |
# This sample demonstrates a common training scenario.
# New models are being trained strarting from the production model (if it exists).
# This sample produces two runs:
# 1. The trainer will train the model from scratch and set as prod after testing it
# 2. Exact same configuration, but the pipeline will discover the existing prod model (published by the 1st run) and warm-start the training from it.
# GCS URI of a directory where the models and the model pointers should be be stored.
model_dir_uri='gs://<bucket>/<path>'
kfp_endpoint=None
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
download_from_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/5c7593f18f347f1c03f5ae6778a1ff305abc315c/components/google-cloud/storage/download/component.yaml')
upload_to_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_explicit_uri/component.yaml')
upload_to_gcs_unique_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_unique_uri/component.yaml')
def continuous_training_pipeline(
model_dir_uri,
training_start_date: str = '2019-02-01',
training_end_date: str = '2019-03-01',
testing_start_date: str = '2019-01-01',
testing_end_date: str = '2019-02-01',
):
# Preparing the training and testing data
training_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(training_start_date), str(training_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Training data').output
testing_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(testing_start_date), str(testing_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Testing data').output
# Preparing the true values for the testing data
true_values_table = pandas_transform_csv_op(
table=testing_data,
transform_code='''df = df[["tips"]]''',
).set_display_name('True values').output
true_values = drop_header_op(true_values_table).output
# Getting the active prod model
prod_model_pointer_uri = str(model_dir_uri) + 'prod'
get_prod_model_uri_task = download_from_gcs_op(
gcs_path=prod_model_pointer_uri,
default_data='',
).set_display_name('Get prod model')
# Disabling cache reuse to always get new data
get_prod_model_uri_task.execution_options.caching_strategy.max_cache_staleness = 'P0D'
prod_model_uri = get_prod_model_uri_task.output
# Training new model from scratch
with kfp.dsl.Condition(prod_model_uri == ""):
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=0,
objective='reg:squarederror',
num_iterations=400,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
# Training new model starting from the prod model
with kfp.dsl.Condition(prod_model_uri != ""):
# Downloading the model
prod_model = download_from_gcs_op(prod_model_uri).output
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
starting_model=prod_model,
label_column=0,
objective='reg:squarederror',
num_iterations=100,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
if __name__ == '__main__':
# Running the first time. The trainer will train the model from scratch and set as prod after testing it
pipelin_run = kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
continuous_training_pipeline,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
pipelin_run.wait_for_run_completion()
# Running the second time. The trainer should warm-start the training from the prod model and set the new model as prod after testing it
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
continuous_training_pipeline,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
| GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/production_ml/labs/samples/core/continue_training_from_prod/continue_training_from_prod.py | Python | apache-2.0 | 7,776 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.messages import CMerkleBlock, FromHex, ToHex
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes
class MerkleBlockTest(BitsendTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing
self.extra_args = [[], [], [], ["-txindex"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransactionwithwallet(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransactionwithwallet(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransactionwithwallet(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
# Now we'll try tweaking a proof.
proof = self.nodes[3].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = FromHex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[2].verifytxoutproof(ToHex(tweaked_proof))
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False]*7
for n in self.nodes:
assert not n.verifytxoutproof(ToHex(tweaked_proof))
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid
if __name__ == '__main__':
MerkleBlockTest().main()
| LIMXTEC/BitSend | test/functional/rpc_txoutproof.py | Python | mit | 5,465 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
import time
from datetime import datetime
from base64 import b64encode
from pprint import pprint
import alfred
alfred.setDefaultEncodingUTF8()
__version__ = '1.4.0'
_base_host = 'http://www.kuaidi100.com/'
_expire_day = 3600 * 24
_expire_year = _expire_day * 365
_post_cache_name = lambda c, p: '-'.join([c, p])
_fb_return = lambda ac='': alfred.Item(title='返回', valid=False, autocomplete=ac)
def formatTimestamp(timestamp, format='%Y-%m-%d %H:%M:%S'):
return datetime.fromtimestamp(timestamp).strftime(format)
def fetchURL(url, **kwargs):
try:
if not kwargs.has_key('referer'):
kwargs.update(referer=_base_host)
res = alfred.request.get(url, **kwargs)
content = res.getContent()
return content
except Exception, e:
pass
# 缓存是否过期
# 这里的过期跟alfred.cache不一样,last_success的差异可能有不同的过期时间
def isCacheOutdate(cache, success_expire, fail_expire):
last_update = cache.get('last_update', 0)
span = time.time() - last_update
success = cache.get('last_success', False)
expire = success_expire if success else fail_expire
return span < 0 or span > expire
# 获取快递公司列表或信息
def getCompany(code=None, key=None):
cache = alfred.cache.get('company')
if not cache or isCacheOutdate(cache, _expire_day*7, 3600):
alfred.config.delete('company') # 删除保存在config中的数据 旧版的保存位置
data = {}
try:
res = fetchURL(
os.path.join(_base_host, 'js/share/company.js'),
data = {
'version' : time.time()
}
)
res = res.lstrip('var jsoncom=').rstrip(';').replace("'", '"')
company = json.loads(res).get('company', [])
if not company:
raise Exception('something error.')
data.update(
last_success = True,
company = company
)
except Exception, e:
# 缓存存在 使用缓存 不在则从company.json获取
if cache:
company=cache.get('company')
else:
with open(os.path.abspath('./company.json'), 'r') as fp:
company=json.load(fp)
data.update(last_success=False, company=company)
data.update(last_update=time.time())
alfred.cache.set('company', data, _expire_year)
data = alfred.cache.get('company')
if not code:
return data.get('company')
for company in data.get('company'):
if company['code'] == code:
return company if key is None else company.get(key, '')
return ''
def getComponyLogo(code):
path = os.path.abspath('./logos/{}.png'.format(code))
if os.path.exists(path):
return path
def getStoredPost(com_code, post_id):
for p in alfred.config.get('post', []):
if p.get('com_code') == com_code and p.get('post_id') == post_id:
return p
# 运单是否是被存储的
def isPostStored(com_code, post_id):
return bool(getStoredPost(com_code, post_id))
# 存储运单
def savePost(com_code, post_id):
if isPostStored(com_code, post_id):
return
post = alfred.config.get('post', [])
post.append({
'com_code' : com_code,
'post_id' : post_id,
'remark' : ''
})
alfred.config.set(post=post)
# 删除运单
def delPost(com_code, post_id):
post = alfred.config.get('post', [])
post = filter(lambda p: p.get('com_code') != com_code or p.get('post_id') != post_id, post)
alfred.config.set(post=post)
# 删除缓存
alfred.cache.delete(_post_cache_name(com_code, post_id))
# 清理过期的缓存
alfred.cache.cleanExpired()
# 清理已签收的运单
def clearCheckedPost():
post = alfred.config.get('post', [])
for p in post:
q = querySingle(p['com_code'], p['post_id'])
if q.get('checked', False):
delPost(p['com_code'], p['post_id'])
# 修改运单备注
def changePostRemark(com_code, post_id, remark):
if not isPostStored(com_code, post_id):
return False
post = alfred.config.get('post', [])
for p in post:
if p.get('com_code') == com_code and p.get('post_id') == post_id:
p['remark'] = remark if isinstance(remark, basestring) and remark else ''
break;
alfred.config.set(post=post)
return True
# 猜测公司代码
# 优先顺序: 代码 > 短名 > 全名
def surmiseCompanyCode(q):
companies = getCompany()
for k in ['code', 'shortname', 'companyname']:
for com in companies:
if com[k].lower() == q.lower():
return com['code']
return q
# 根据订单号猜测快递公司
def queryCompanyCodeByPostID(post_id):
try:
res = fetchURL(
os.path.join(_base_host, 'autonumber/auto'),
data = {
'num' : post_id
}
)
coms = json.loads(res)
return [com['comCode'] for com in coms]
except Exception, e:
pass
# 查询某个特定运单
# success 是否曾成功获取过数据 last_success 最近一次是否成功获取数据
def querySingle(com_code, post_id):
cache_name = _post_cache_name(com_code, post_id)
cache = alfred.cache.get(cache_name)
# 存在 且是 已经签收或未过期
if cache and ( cache.get('checked', False) or not isCacheOutdate(cache, 60*30, 60) ):
return cache
data = {}
try:
res = fetchURL(
os.path.join(_base_host, 'query'),
data = {
'type' : com_code,
'postid' : post_id
}
)
res = json.loads(res)
# 失败抛出一个异常
if res['status'] != '200':
raise Exception(res['message'])
com = getCompany(res['com'])
data.update(
success = True,
last_success = True,
last_message = '',
post_id = res['nu'],
checked = True if res['ischeck'] == '1' else False,
com_code = res['com'],
com_name = com.get('companyname', '未知'),
com_shortname = com.get('shortname', '未知'),
trace = [{'time':t['ftime'], 'content':t['context']} for t in res.get('data', [])]
)
except Exception, e:
# 失败 检查缓存 若有 更新 否则创建
data.update(cache if cache else {'success':False})
data.update(last_message=str(e), last_success=False)
data.update(last_update=time.time())
# 缓存长期保存 防止已有成功查询后出现失败的查询无法使用先前的成功结果
# 是否过期通过last_update
# 如果success=False即尚未有成功的查询 则缩短缓存有效期
# 防止某些非需要的运单号(如输入单号过程中或错误的单号)被长期记录在缓存中
expire = _expire_day*7 if data.get('success', False) else 60
alfred.cache.set(cache_name, data, expire)
return data
# 显示快递公司列表
def showCompanyList():
key = alfred.argv(2)
companies = getCompany()
if key:
key = key.lower()
companies = filter(lambda c: key in c['companyname'].lower() or key in c['code'], companies)
if not companies:
alfred.exitWithFeedback(title='没有找到相关的内容')
feedback = alfred.Feedback()
for com in companies:
feedback.addItem(
title = com['companyname'],
subtitle = '代码: {} 电话: {} 官方网站: {}'.format(com['code'], com['tel'], com['comurl']),
icon = getComponyLogo(com['code']),
arg = 'open-url {}'.format(b64encode(com['comurl']))
)
feedback.output()
# 显示存储的快递单
def showSaved():
post = alfred.config.get('post')
feedback = alfred.Feedback()
has_checked = False
if post:
for p in post[::-1]:
q = querySingle(p['com_code'], p['post_id'])
if q.get('checked', False):
has_checked = True
item = {}
item.update(
title = '{} {} {}'.format(getCompany(p['com_code'], 'companyname'), p['post_id'], p.get('remark', '')),
icon = getComponyLogo(p['com_code']),
valid = False,
autocomplete = '{} {}'.format(p['com_code'], p['post_id'])
)
if q.get('success'):
item['subtitle'] = '{time} {content}'.format(**q['trace'][0])
else:
item['subtitle'] = '{} 暂时没有记录,运单号不存在、未记录或已经过期。'.format(formatTimestamp(q['last_update']))
feedback.addItem(**item)
# 有已签收的
if has_checked:
feedback.addItem(
title = '清除所有已签收的运单?',
arg = 'clear-checked-post'
)
else:
feedback.addItem(
title = '国内快递查询',
subtitle = '直接使用运单号,如:904080779005,快递公司+运单号,如:顺风 904080779005',
valid = False
)
feedback.output()
def showSingle(com_code, post_id):
data = querySingle(com_code, post_id)
post_info = '{} {}'.format(getCompany(com_code, 'companyname'), post_id)
feedback = alfred.Feedback()
if not data.get('success'):
feedback.addItem(
title = '查询失败: {}'.format(post_info),
subtitle = '{} {}'.format(formatTimestamp(data['last_update']), data.get('last_message', '')),
icon = os.path.abspath('./icon-error.png'),
valid = False
)
else:
# 查询成功 自动保存运单方便下次查询
savePost(com_code, post_id)
post = getStoredPost(com_code, post_id)
remark = post.get('remark', '')
last_update_info = '' if data.get('last_success') else '本次查询失败,下面显示的是先前的查询结果。'
update_info = '已签收 ' if data['checked'] else last_update_info
feedback.addItem(
title = '{} {}'.format(post_info, remark),
subtitle = '最后查询: {} {}'.format(
formatTimestamp(data['last_update']),
update_info
),
icon = getComponyLogo(com_code),
valid = False
)
count = len(data['trace'])
for t in data['trace']:
feedback.addItem(
title = '{:02d}. {}'.format(count, t['content']),
subtitle = t['time'],
valid = False
)
count = count - 1
# 运单是否保存
stored = isPostStored(com_code, post_id)
# 已保存的 可添加或修改备注
if stored:
post = getStoredPost(com_code, post_id)
remark = post.get('remark')
remark = '当前备注: {}'.format(remark) if remark else '运单当前尚无备注信息。'
feedback.addItem(
title = '设置或修改备注',
subtitle = remark,
icon = os.path.abspath('./icon-info.png'),
valid = False,
autocomplete = 'remark-{}-{} '.format(com_code, post_id)
)
feedback.addItem(
title = '该运单已被保存,删除运单记录?' if stored else '依然保存运单记录?',
subtitle = '保存的运单可方便后续跟踪,查询成功的运单将被自动保存。',
icon = os.path.abspath('./icon-del.png' if stored else './icon-save.png'),
arg = '{} {} {}'.format('del-post' if stored else 'save-post', com_code, post_id)
)
feedback.addItem(item=_fb_return())
feedback.output()
def showRecommendCompany(recommend_com_codes, post_id):
feedback = alfred.Feedback()
recommend_com = []
for code in recommend_com_codes:
com = getCompany(code)
if com:
recommend_com.append(com)
# 找到推荐的快递公司
if recommend_com:
feedback.addItem(
title = '根据运单号找到下列可能的快递公司,请选择:',
icon = os.path.abspath('./icon-info.png'),
valid = False
)
map(lambda c: feedback.addItem(
title = c['companyname'],
subtitle = c['freginfo'],
valid = False,
autocomplete = '{} {}'.format(c['code'], post_id)
), recommend_com
)
# 其它所有快递公司
feedback.addItem(
title = '{},请在下列中选择:'.format('如果不存在于上述快递公司之中' if recommend_com else '根据运单号没有找到符合的快递公司'),
icon = os.path.abspath('./icon-info.png'),
valid = False
)
for com in getCompany():
if com in recommend_com:
continue;
feedback.addItem(
title = com['companyname'],
subtitle = com['freginfo'],
valid = False,
autocomplete = '{} {}'.format(com['code'], post_id)
)
feedback.output()
# 根据单号 自动查询
# 1. 查询参数只有一个 则为 运单号
# 2. 查询参数多余一个 则第一个为快递公司名称、缩短名或代码 第二个为运单号
def query():
arg1 = alfred.argv(1)
arg2 = alfred.argv(2)
post_id = arg2 if arg2 else arg1
com_codes = arg1 if arg2 else None
# 如果只有快递单号 没有公司代码 先自动检测获得公司代码
com_codes = [surmiseCompanyCode(com_codes)] if com_codes is not None else queryCompanyCodeByPostID(post_id)
if not com_codes:
alfred.exitWithFeedback(title='没有找到相关的快递信息')
# 只有一个公司符合 直接给出结果
if len(com_codes) == 1:
showSingle(com_codes[0], post_id)
# 如果有多个 则列出公司列表
else:
showRecommendCompany(com_codes, post_id)
# 设置备注
def showRemarkSetting():
try:
_, com_code, post_id = alfred.argv(1).split('-')
except Exception, e:
alfred.exitWithFeedback(title='参数格式错误。', subtitle='选择返回', valid=False, autocomplete='')
post = getStoredPost(com_code, post_id)
if not post:
alfred.exitWithFeedback(
title = '该运单未被保存',
subtitle = '仅可对已保存的运单设置备注,选择查询并尝试保存运单',
icon = os.path.abspath('./icon-error.png'),
valid = False,
autocomplete = '{} {}'.format(com_code, post_id)
)
remark = ' '.join(sys.argv[2:])
remark_stored = post.get('remark') if post.get('remark') else '-'
feedback = alfred.Feedback()
feedback.addItem(
title = '为运单【{} {}】设置或修改备注'.format(getCompany(com_code, 'companyname'), post_id),
subtitle = '旧备注: {} 新备注: {}'.format(remark_stored, remark),
arg = 'remark-setting {} {} {}'.format(com_code, post_id, remark)
)
feedback.addItem(item=_fb_return('{} {}'.format(com_code, post_id)))
feedback.output()
def main():
cmd = alfred.argv(1)
# 没有参数 参数存储的运单
if not cmd:
return showSaved()
cmd = cmd.strip().lower()
# 快递公司列表 以company开始的值都是查询公司列表
if 'company'.startswith(cmd):
showCompanyList()
# 以remark开始的值设置备注
elif cmd.startswith('remark'):
showRemarkSetting()
else:
query()
if __name__ == '__main__':
main() | vaginessa/alfred-workflows | src/express/express.py | Python | mit | 16,040 |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.0.14+dev"
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-network==1.4.0',
'azure-mgmt-trafficmanager==0.30.0',
'azure-mgmt-dns==1.0.1',
'azure-mgmt-resource==1.1.0',
'azure-cli-core',
'mock'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-network',
version=VERSION,
description='Microsoft Azure Command-Line Tools Network Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.network',
'azure.cli.command_modules.network.zone_file'
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
| QingChenmsft/azure-cli | src/command_modules/azure-cli-network/setup.py | Python | mit | 2,104 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from settings import LDAP_SERVER, URL_PREFIX
from superdesk.tests import TestCase
from superdesk import get_resource_service
from .commands import ImportUserProfileFromADCommand
class ImportUsersTestCase(TestCase):
def test_create_user_command(self):
if LDAP_SERVER:
user = {'username': 'sduser1', 'password': 'Password.01', 'user_to_import': 'sduser1'}
cmd = ImportUserProfileFromADCommand()
with self.app.test_request_context(URL_PREFIX):
cmd.run(user['username'], user['password'], user['user_to_import'])
auth_user = get_resource_service('auth').authenticate(user)
self.assertEquals(auth_user['username'], user['username'])
cmd.run(user['username'], user['password'], user['user_to_import'])
auth_user2 = get_resource_service('auth').authenticate(user)
self.assertEquals(auth_user2['username'], user['username'])
self.assertEquals(auth_user2['_id'], auth_user['_id'])
| vied12/superdesk | server/apps/auth/ldap/tests.py | Python | agpl-3.0 | 1,346 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase, main
from pitchpx.mlbam_util import MlbamUtil, MlbAmHttpNotFound
__author__ = 'Shinichi Nakagawa'
class TestMlbamUtil(TestCase):
"""
MLBAM Util Class Test
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_get_content_200(self):
"""
Get html content(status:200, head:default)
"""
req = MlbamUtil._get_content(
'http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_lanmlb_sdnmlb_1/game.xml'
)
self.assertEqual(req.status_code, 200)
self.assertEqual(req.request.headers['Accept'],
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
self.assertEqual(req.request.headers['User-Agent'],
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36'))
def test_get_content_200_setting_header(self):
"""
Get html content(status:200, head:original)
"""
req = MlbamUtil._get_content(
'http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_lanmlb_sdnmlb_1/game.xml',
headers={'Accept': 'text/html', 'User-Agent': 'Python-urllib/3.5'}
)
self.assertEqual(req.status_code, 200)
self.assertEqual(req.request.headers['Accept'], 'text/html')
self.assertEqual(req.request.headers['User-Agent'], 'Python-urllib/3.5')
def test_get_content_404_setting_header(self):
"""
Get html content(status:404, head:original)
"""
req = MlbamUtil._get_content(
'http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_chnmlb_anamlb_1/game.xml',
headers={'Accept': 'text/html', 'User-Agent': 'Python-urllib/3.5'}
)
self.assertEqual(req.status_code, 404)
self.assertEqual(req.request.headers['Accept'], 'text/html')
self.assertEqual(req.request.headers['User-Agent'], 'Python-urllib/3.5')
def test_find_xml_200(self):
"""
Get xml content(status:200, head:default)
"""
req = MlbamUtil.find_xml(
'http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_lanmlb_sdnmlb_1/game.xml',
'lxml',
)
self.assertIsNotNone(req)
def test_find_xml_404(self):
"""
Get xml content(status:404, head:default)
"""
try:
_ = MlbamUtil.find_xml(
'http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_chnmlb_anamlb_1/game.xml',
'lxml',
)
except MlbAmHttpNotFound as e:
self.assertEqual(
e.msg,
('HTTP Error '
'url: http://gd2.mlb.com/components/game/mlb/year_2016/month_04/day_06/gid_2016_04_06_chnmlb_anamlb_1/game.xml '
'status: 404'
)
)
if __name__ == '__main__':
main() | Shinichi-Nakagawa/pitchpx | tests/pitchpx/test_mlbam_util.py | Python | mit | 3,194 |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from .utils import get_beers
from django.shortcuts import render
from .scripts.trainClassifier import LRBeerClassifier
import simplejson as json
# Create your views here.
def index(request):
beers = get_beers()
return render(request, 'kettle/index.html', {'beers': beers})
def beer_list(request):
beers = get_beers()
return JsonResponse({"result": beers})
def crunch(request):
print(request.body)
received_json_data = json.loads(request.body)
like_ids = received_json_data['like_ids']
dislike_ids = received_json_data['dislike_ids']
classifier = LRBeerClassifier()
classifier.train(like_ids, dislike_ids)
other_beers = [b for b in get_beers() if b['id'] not in like_ids + dislike_ids]
results = [(beer, classifier.classify(beer['id'])) for beer in other_beers]
return JsonResponse({"result": sorted(results, key=lambda x: x[1], reverse=True)})
| hacktobacillus/fermenter | kettle/views.py | Python | mit | 987 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Wil Clouser <clouserw@micropipes.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Intercepts emoji characters and converts them back to their alias. Useful if
# you're on a terminal or something that doesn't support emoji.
#
# History:
#
# 2019-07-06, Sébastien Helleu <flashcode@flashtux.org>:
# v0.2: Make script compatible with Python 3, fix PEP8 errors.
#
# 2016-03-15, Wil Clouser <clouserw@micropipes.com>:
# v0.1: Initial release, based on Mike Reinhardt's BSD
# licensed emoji_aliases.py
from __future__ import print_function
import re
import sys
SCRIPT_NAME = "emoji2alias"
SCRIPT_AUTHOR = "Wil Clouser <clouserw@micropipes.com>"
SCRIPT_VERSION = "0.2"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Replaces emoji characters with their aliases"
import_ok = True
try:
import weechat as w
except ImportError:
print("Script must be run under weechat: https://weechat.org")
import_ok = False
EMOJI_ALIASES = {
u'\U0001f44d': u':+1:',
u'\U0001f44e': u':-1:',
u'\U0001f4af': u':100:',
u'\U0001f522': u':1234:',
u'\U0001f3b1': u':8ball:',
u'\U0001f170': u':a:',
u'\U0001f18e': u':ab:',
u'\U0001f524': u':abc:',
u'\U0001f521': u':abcd:',
u'\U0001f251': u':accept:',
u'\U0001f39f': u':admission_tickets:',
u'\U0001f6a1': u':aerial_tramway:',
u'\U00002708': u':airplane:',
u'\U0001f6ec': u':airplane_arriving:',
u'\U0001f6eb': u':airplane_departure:',
u'\U000023f0': u':alarm_clock:',
u'\U00002697': u':alembic:',
u'\U0001f47d': u':alien:',
u'\U0001f47e': u':alien_monster:',
u'\U0001f691': u':ambulance:',
u'\U0001f3c8': u':american_football:',
u'\U0001f3fa': u':amphora:',
u'\U00002693': u':anchor:',
u'\U0001f47c': u':angel:',
u'\U0001f4a2': u':anger:',
u'\U0001f620': u':angry:',
u'\U0001f627': u':anguished:',
u'\U0001f41c': u':ant:',
u'\U0001f4f6': u':antenna_with_bars:',
u'\U0001f34e': u':apple:',
u'\U00002652': u':aquarius:',
u'\U00002648': u':aries:',
u'\U000025c0': u':arrow_backward:',
u'\U000023ec': u':arrow_double_down:',
u'\U000023eb': u':arrow_double_up:',
u'\U00002b07': u':arrow_down:',
u'\U0001f53d': u':arrow_down_small:',
u'\U000025b6': u':arrow_forward:',
u'\U00002935': u':arrow_heading_down:',
u'\U00002934': u':arrow_heading_up:',
u'\U00002b05': u':arrow_left:',
u'\U00002199': u':arrow_lower_left:',
u'\U00002198': u':arrow_lower_right:',
u'\U000027a1': u':arrow_right:',
u'\U000021aa': u':arrow_right_hook:',
u'\U00002b06': u':arrow_up:',
u'\U00002195': u':arrow_up_down:',
u'\U0001f53c': u':arrow_up_small:',
u'\U00002196': u':arrow_upper_left:',
u'\U00002197': u':arrow_upper_right:',
u'\U0001f503': u':arrows_clockwise:',
u'\U0001f504': u':arrows_counterclockwise:',
u'\U0001f3a8': u':art:',
u'\U0001f69b': u':articulated_lorry:',
u'\U0001f632': u':astonished:',
u'\U0001f45f': u':athletic_shoe:',
u'\U0001f3e7': u':atm:',
u'\U0000269b': u':atom_symbol:',
u'\U0001f346': u':aubergine:',
u'\U0001f697': u':automobile:',
u'\U0001f171': u':b:',
u'\U0001f476': u':baby:',
u'\U0001f37c': u':baby_bottle:',
u'\U0001f424': u':baby_chick:',
u'\U0001f6bc': u':baby_symbol:',
u'\U0001f519': u':back:',
u'\U0001f42b': u':bactrian_camel:',
u'\U0001f3f8': u':badminton_racquet_and_shuttlecock:',
u'\U0001f6c4': u':baggage_claim:',
u'\U0001f388': u':balloon:',
u'\U0001f5f3': u':ballot_box_with_ballot:',
u'\U00002611': u':ballot_box_with_check:',
u'\U0001f38d': u':bamboo:',
u'\U0001f34c': u':banana:',
u'\U0000203c': u':bangbang:',
u'\U0001f3e6': u':bank:',
u'\U0001f4b5': u':banknote_with_dollar_sign:',
u'\U0001f4b6': u':banknote_with_euro_sign:',
u'\U0001f4b7': u':banknote_with_pound_sign:',
u'\U0001f4b4': u':banknote_with_yen_sign:',
u'\U0001f4ca': u':bar_chart:',
u'\U0001f488': u':barber:',
u'\U0001f325': u':barely_sunny:',
u'\U000026be': u':baseball:',
u'\U0001f3c0': u':basketball:',
u'\U0001f6c0': u':bath:',
u'\U0001f6c1': u':bathtub:',
u'\U0001f50b': u':battery:',
u'\U0001f3d6': u':beach_with_umbrella:',
u'\U0001f43b': u':bear:',
u'\U0001f493': u':beating_heart:',
u'\U0001f6cf': u':bed:',
u'\U0001f41d': u':bee:',
u'\U0001f37a': u':beer:',
u'\U0001f37b': u':beers:',
u'\U0001f41e': u':beetle:',
u'\U0001f530': u':beginner:',
u'\U0001f514': u':bell:',
u'\U0001f515': u':bell_with_cancellation_stroke:',
u'\U0001f6ce': u':bellhop_bell:',
u'\U0001f371': u':bento:',
u'\U0001f6b2': u':bicycle:',
u'\U0001f6b4': u':bicyclist:',
u'\U0001f459': u':bikini:',
u'\U00002623': u':biohazard_sign:',
u'\U0001f426': u':bird:',
u'\U0001f382': u':birthday:',
u'\U000026ab': u':black_circle:',
u'\U000023fa': u':black_circle_for_record:',
u'\U00002663': u':black_club_suit:',
u'\U00002666': u':black_diamond_suit:',
u'\U00002665': u':black_heart_suit:',
u'\U0001f0cf': u':black_joker:',
u'\U00002b1b': u':black_large_square:',
u'\U000023ea': u':black_left-pointing_double_triangle:',
u'\U000025fe': u':black_medium_small_square:',
u'\U000025fc': u':black_medium_square:',
u'\U00002712': u':black_nib:',
u'\U00002753': u':black_question_mark_ornament:',
u'\U000023e9': u':black_right-pointing_double_triangle:',
u'\U00002702': u':black_scissors:',
u'\U000025aa': u':black_small_square:',
u'\U00002660': u':black_spade_suit:',
u'\U0001f532': u':black_square_button:',
u'\U000023f9': u':black_square_for_stop:',
u'\U00002600': u':black_sun_with_rays:',
u'\U0000260e': u':black_telephone:',
u'\U0000267b': u':black_universal_recycling_symbol:',
u'\U0001f33c': u':blossom:',
u'\U0001f421': u':blowfish:',
u'\U0001f4d8': u':blue_book:',
u'\U0001f699': u':blue_car:',
u'\U0001f499': u':blue_heart:',
u'\U0001f60a': u':blush:',
u'\U0001f417': u':boar:',
u'\U000026f5': u':boat:',
u'\U0001f4a3': u':bomb:',
u'\U0001f4d6': u':book:',
u'\U0001f516': u':bookmark:',
u'\U0001f4d1': u':bookmark_tabs:',
u'\U0001f4da': u':books:',
u'\U0001f4a5': u':boom:',
u'\U0001f462': u':boot:',
u'\U0001f37e': u':bottle_with_popping_cork:',
u'\U0001f490': u':bouquet:',
u'\U0001f647': u':bow:',
u'\U0001f3f9': u':bow_and_arrow:',
u'\U0001f3b3': u':bowling:',
u'\U0001f466': u':boy:',
u'\U0001f35e': u':bread:',
u'\U0001f470': u':bride_with_veil:',
u'\U0001f309': u':bridge_at_night:',
u'\U0001f4bc': u':briefcase:',
u'\U0001f494': u':broken_heart:',
u'\U0001f41b': u':bug:',
u'\U0001f3d7': u':building_construction:',
u'\U0001f4a1': u':bulb:',
u'\U0001f685': u':bullettrain_front:',
u'\U0001f684': u':bullettrain_side:',
u'\U0001f32f': u':burrito:',
u'\U0001f68c': u':bus:',
u'\U0001f68f': u':bus_stop:',
u'\U0001f464': u':bust_in_silhouette:',
u'\U0001f465': u':busts_in_silhouette:',
u'\U0001f335': u':cactus:',
u'\U0001f370': u':cake:',
u'\U0001f4c5': u':calendar:',
u'\U0001f4c6': u':calendar:',
u'\U0001f4f2': u':calling:',
u'\U0001f4f7': u':camera:',
u'\U0001f4f8': u':camera_with_flash:',
u'\U0001f3d5': u':camping:',
u'\U0000264b': u':cancer:',
u'\U0001f56f': u':candle:',
u'\U0001f36c': u':candy:',
u'\U0001f520': u':capital_abcd:',
u'\U00002651': u':capricorn:',
u'\U0001f5c3': u':card_file_box:',
u'\U0001f4c7': u':card_index:',
u'\U0001f5c2': u':card_index_dividers:',
u'\U0001f3a0': u':carousel_horse:',
u'\U0001f38f': u':carp_streamer:',
u'\U0001f408': u':cat2:',
u'\U0001f431': u':cat:',
u'\U0001f639': u':cat_face_with_tears_of_joy:',
u'\U0001f63c': u':cat_face_with_wry_smile:',
u'\U0001f4bf': u':cd:',
u'\U000026d3': u':chains:',
u'\U0001f4b9': u':chart:',
u'\U0001f4c9': u':chart_with_downwards_trend:',
u'\U0001f4c8': u':chart_with_upwards_trend:',
u'\U0001f3c1': u':checkered_flag:',
u'\U0001f4e3': u':cheering_megaphone:',
u'\U0001f9c0': u':cheese_wedge:',
u'\U0001f352': u':cherries:',
u'\U0001f338': u':cherry_blossom:',
u'\U0001f330': u':chestnut:',
u'\U0001f414': u':chicken:',
u'\U0001f6b8': u':children_crossing:',
u'\U0001f43f': u':chipmunk:',
u'\U0001f36b': u':chocolate_bar:',
u'\U0001f384': u':christmas_tree:',
u'\U000026ea': u':church:',
u'\U0001f3a6': u':cinema:',
u'\U0001f250': u':circled_ideograph_advantage:',
u'\U00003297': u':circled_ideograph_congratulation:',
u'\U00003299': u':circled_ideograph_secret:',
u'\U000024c2': u':circled_latin_capital_letter_m:',
u'\U0001f3aa': u':circus_tent:',
u'\U0001f307': u':city_sunrise:',
u'\U0001f306': u':city_sunset:',
u'\U0001f3d9': u':cityscape:',
u'\U0001f191': u':cl:',
u'\U0001f44f': u':clap:',
u'\U0001f3ac': u':clapper:',
u'\U0001f3db': u':classical_building:',
u'\U0001f4cb': u':clipboard:',
u'\U0001f565': u':clock1030:',
u'\U0001f559': u':clock10:',
u'\U0001f566': u':clock1130:',
u'\U0001f55a': u':clock11:',
u'\U0001f567': u':clock1230:',
u'\U0001f55b': u':clock12:',
u'\U0001f55c': u':clock130:',
u'\U0001f550': u':clock1:',
u'\U0001f55d': u':clock230:',
u'\U0001f551': u':clock2:',
u'\U0001f55e': u':clock330:',
u'\U0001f552': u':clock3:',
u'\U0001f55f': u':clock430:',
u'\U0001f553': u':clock4:',
u'\U0001f560': u':clock530:',
u'\U0001f554': u':clock5:',
u'\U0001f561': u':clock630:',
u'\U0001f555': u':clock6:',
u'\U0001f562': u':clock730:',
u'\U0001f556': u':clock7:',
u'\U0001f563': u':clock830:',
u'\U0001f557': u':clock8:',
u'\U0001f564': u':clock930:',
u'\U0001f558': u':clock9:',
u'\U0001f4d5': u':closed_book:',
u'\U0001f510': u':closed_lock_with_key:',
u'\U0001f4ea': u':closed_mailbox_with_lowered_flag:',
u'\U0001f4eb': u':closed_mailbox_with_raised_flag:',
u'\U0001f302': u':closed_umbrella:',
u'\U00002601': u':cloud:',
u'\U0001f329': u':cloud_with_lightning:',
u'\U0001f327': u':cloud_with_rain:',
u'\U0001f328': u':cloud_with_snow:',
u'\U0001f32a': u':cloud_with_tornado:',
u'\U0001f378': u':cocktail:',
u'\U00002615': u':coffee:',
u'\U000026b0': u':coffin:',
u'\U0001f630': u':cold_sweat:',
u'\U00002604': u':comet:',
u'\U0001f5dc': u':compression:',
u'\U0001f4bb': u':computer:',
u'\U0001f38a': u':confetti_ball:',
u'\U0001f616': u':confounded:',
u'\U0001f615': u':confused:',
u'\U0001f6a7': u':construction:',
u'\U0001f477': u':construction_worker:',
u'\U0001f39b': u':control_knobs:',
u'\U0001f3ea': u':convenience_store:',
u'\U0001f35a': u':cooked_rice:',
u'\U0001f36a': u':cookie:',
u'\U0001f373': u':cooking:',
u'\U0001f192': u':cool:',
u'\U0001f46e': u':cop:',
u'\U000000a9': u':copyright:',
u'\U0001f33d': u':corn:',
u'\U0001f6cb': u':couch_and_lamp:',
u'\U0001f46b': u':couple:',
u'\U0001f491': u':couple_with_heart:',
u'\U0001f48f': u':couplekiss:',
u'\U0001f404': u':cow2:',
u'\U0001f42e': u':cow:',
u'\U0001f980': u':crab:',
u'\U0001f4b3': u':credit_card:',
u'\U0001f319': u':crescent_moon:',
u'\U0001f3cf': u':cricket_bat_and_ball:',
u'\U0001f40a': u':crocodile:',
u'\U0000274c': u':cross_mark:',
u'\U0001f38c': u':crossed_flags:',
u'\U00002694': u':crossed_swords:',
u'\U0001f451': u':crown:',
u'\U0001f622': u':cry:',
u'\U0001f63f': u':crying_cat_face:',
u'\U0001f52e': u':crystal_ball:',
u'\U0001f498': u':cupid:',
u'\U000027b0': u':curly_loop:',
u'\U0001f4b1': u':currency_exchange:',
u'\U0001f35b': u':curry:',
u'\U0001f36e': u':custard:',
u'\U0001f6c3': u':customs:',
u'\U0001f300': u':cyclone:',
u'\U0001f5e1': u':dagger_knife:',
u'\U0001f483': u':dancer:',
u'\U0001f46f': u':dancers:',
u'\U0001f361': u':dango:',
u'\U0001f576': u':dark_sunglasses:',
u'\U0001f3af': u':dart:',
u'\U0001f4a8': u':dash:',
u'\U0001f333': u':deciduous_tree:',
u'\U0001f69a': u':delivery_truck:',
u'\U0001f3ec': u':department_store:',
u'\U0001f3da': u':derelict_house_building:',
u'\U0001f3dc': u':desert:',
u'\U0001f3dd': u':desert_island:',
u'\U0001f5a5': u':desktop_computer:',
u'\U0001f4a0': u':diamond_shape_with_a_dot_inside:',
u'\U0001f61e': u':disappointed:',
u'\U0001f625': u':disappointed_but_relieved_face:',
u'\U0001f4ab': u':dizzy:',
u'\U0001f635': u':dizzy_face:',
u'\U0001f6af': u':do_not_litter:',
u'\U0001f415': u':dog2:',
u'\U0001f436': u':dog:',
u'\U0001f38e': u':dolls:',
u'\U0001f42c': u':dolphin:',
u'\U0001f6aa': u':door:',
u'\U000027bf': u':double_curly_loop:',
u'\U000023f8': u':double_vertical_bar:',
u'\U0001f369': u':doughnut:',
u'\U0001f54a': u':dove_of_peace:',
u'\U0001f53b': u':down-pointing_red_triangle:',
u'\U0001f409': u':dragon:',
u'\U0001f432': u':dragon_face:',
u'\U0001f457': u':dress:',
u'\U0001f42a': u':dromedary_camel:',
u'\U0001f4a7': u':droplet:',
u'\U0001f4c0': u':dvd:',
u'\U0001f4e7': u':e-mail:',
u'\U0001f442': u':ear:',
u'\U0001f33e': u':ear_of_rice:',
u'\U0001f30d': u':earth_africa:',
u'\U0001f30e': u':earth_americas:',
u'\U0001f30f': u':earth_asia:',
u'\U00002734': u':eight_pointed_black_star:',
u'\U00002733': u':eight_spoked_asterisk:',
u'\U000023cf': u':eject_symbol:',
u'\U0001f50c': u':electric_plug:',
u'\U0001f526': u':electric_torch:',
u'\U0001f418': u':elephant:',
u'\U00002709': u':email:',
u'\U0001f3fb': u':emoji_modifier_fitzpatrick_type-1-2:',
u'\U0001f3fc': u':emoji_modifier_fitzpatrick_type-3:',
u'\U0001f3fd': u':emoji_modifier_fitzpatrick_type-4:',
u'\U0001f3fe': u':emoji_modifier_fitzpatrick_type-5:',
u'\U0001f3ff': u':emoji_modifier_fitzpatrick_type-6:',
u'\U0001f51a': u':end:',
u'\U0001f4e9': u':envelope_with_arrow:',
u'\U0001f3f0': u':european_castle:',
u'\U0001f3e4': u':european_post_office:',
u'\U0001f332': u':evergreen_tree:',
u'\U00002757': u':exclamation:',
u'\U00002049': u':exclamation_question_mark:',
u'\U0001f611': u':expressionless:',
u'\U0001f441': u':eye:',
u'\U0001f453': u':eyeglasses:',
u'\U0001f440': u':eyes:',
u'\U0001f486': u':face_massage:',
u'\U0001f60b': u':face_savouring_delicious_food:',
u'\U0001f631': u':face_screaming_in_fear:',
u'\U0001f618': u':face_throwing_a_kiss:',
u'\U0001f613': u':face_with_cold_sweat:',
u'\U0001f915': u':face_with_head-bandage:',
u'\U0001f624': u':face_with_look_of_triumph:',
u'\U0001f637': u':face_with_medical_mask:',
u'\U0001f645': u':face_with_no_good_gesture:',
u'\U0001f646': u':face_with_ok_gesture:',
u'\U0001f62e': u':face_with_open_mouth:',
u'\U0001f644': u':face_with_rolling_eyes:',
u'\U0001f61b': u':face_with_stuck-out_tongue:',
u'\U0001f602': u':face_with_tears_of_joy:',
u'\U0001f912': u':face_with_thermometer:',
u'\U0001f636': u':face_without_mouth:',
u'\U0001f44a': u':facepunch:',
u'\U0001f3ed': u':factory:',
u'\U0001f342': u':fallen_leaf:',
u'\U0001f46a': u':family:',
u'\U0001f385': u':father_christmas:',
u'\U0001f4e0': u':fax:',
u'\U0001f628': u':fearful:',
u'\U0001f43e': u':feet:',
u'\U0001f3a1': u':ferris_wheel:',
u'\U000026f4': u':ferry:',
u'\U0001f3d1': u':field_hockey_stick_and_ball:',
u'\U0001f5c4': u':file_cabinet:',
u'\U0001f4c1': u':file_folder:',
u'\U0001f39e': u':film_frames:',
u'\U0001f4fd': u':film_projector:',
u'\U0001f525': u':fire:',
u'\U0001f692': u':fire_engine:',
u'\U0001f387': u':firework_sparkler:',
u'\U0001f386': u':fireworks:',
u'\U0001f313': u':first_quarter_moon:',
u'\U0001f31b': u':first_quarter_moon_with_face:',
u'\U0001f41f': u':fish:',
u'\U0001f365': u':fish_cake:',
u'\U0001f3a3': u':fishing_pole_and_fish:',
u'\U0000270a': u':fist:',
u'\U000026f3': u':flag_in_hole:',
u'\U0000269c': u':fleur-de-lis:',
u'\U0001f4aa': u':flexed_biceps:',
u'\U0001f4be': u':floppy_disk:',
u'\U0001f3b4': u':flower_playing_cards:',
u'\U0001f633': u':flushed:',
u'\U0001f32b': u':fog:',
u'\U0001f301': u':foggy:',
u'\U0001f463': u':footprints:',
u'\U0001f374': u':fork_and_knife:',
u'\U0001f37d': u':fork_and_knife_with_plate:',
u'\U000026f2': u':fountain:',
u'\U0001f340': u':four_leaf_clover:',
u'\U0001f5bc': u':frame_with_picture:',
u'\U0001f193': u':free:',
u'\U0001f35f': u':french_fries:',
u'\U0001f364': u':fried_shrimp:',
u'\U0001f438': u':frog:',
u'\U0001f425': u':front-facing_baby_chick:',
u'\U0001f626': u':frowning:',
u'\U000026fd': u':fuel_pump:',
u'\U0001f315': u':full_moon:',
u'\U0001f31d': u':full_moon_with_face:',
u'\U000026b1': u':funeral_urn:',
u'\U0001f3b2': u':game_die:',
u'\U00002699': u':gear:',
u'\U0001f48e': u':gem:',
u'\U0000264a': u':gemini:',
u'\U0001f47b': u':ghost:',
u'\U0001f381': u':gift:',
u'\U0001f49d': u':gift_heart:',
u'\U0001f467': u':girl:',
u'\U0001f310': u':globe_with_meridians:',
u'\U0001f31f': u':glowing_star:',
u'\U0001f410': u':goat:',
u'\U0001f3cc': u':golfer:',
u'\U0001f393': u':graduation_cap:',
u'\U0001f347': u':grapes:',
u'\U0001f34f': u':green_apple:',
u'\U0001f4d7': u':green_book:',
u'\U0001f49a': u':green_heart:',
u'\U00002755': u':grey_exclamation:',
u'\U00002754': u':grey_question:',
u'\U0001f62c': u':grimacing:',
u'\U0001f601': u':grin:',
u'\U0001f600': u':grinning:',
u'\U0001f638': u':grinning_cat_face_with_smiling_eyes:',
u'\U0001f497': u':growing_heart:',
u'\U0001f482': u':guardsman:',
u'\U0001f3b8': u':guitar:',
u'\U0001f52b': u':gun:',
u'\U0001f487': u':haircut:',
u'\U0001f354': u':hamburger:',
u'\U0001f528': u':hammer:',
u'\U00002692': u':hammer_and_pick:',
u'\U0001f6e0': u':hammer_and_wrench:',
u'\U0001f439': u':hamster:',
u'\U0000270b': u':hand:',
u'\U0001f45c': u':handbag:',
u'\U0001f4a9': u':hankey:',
u'\U0001f64b': u':happy_person_raising_one_hand:',
u'\U0001f423': u':hatching_chick:',
u'\U0001f3a7': u':headphone:',
u'\U0001f649': u':hear-no-evil_monkey:',
u'\U00002764': u':heart:',
u'\U0001f49f': u':heart_decoration:',
u'\U0001f60d': u':heart_eyes:',
u'\U0001f63b': u':heart_eyes_cat:',
u'\U00002714': u':heavy_check_mark:',
u'\U00002797': u':heavy_division_sign:',
u'\U0001f4b2': u':heavy_dollar_sign:',
u'\U00002763': u':heavy_heart_exclamation_mark_ornament:',
u'\U00002b55': u':heavy_large_circle:',
u'\U00002796': u':heavy_minus_sign:',
u'\U00002716': u':heavy_multiplication_x:',
u'\U00002795': u':heavy_plus_sign:',
u'\U0001f681': u':helicopter:',
u'\U00002388': u':helm_symbol:',
u'\U000026d1': u':helmet_with_white_cross:',
u'\U0001f33f': u':herb:',
u'\U0001f33a': u':hibiscus:',
u'\U0001f460': u':high-heeled_shoe:',
u'\U0001f506': u':high_brightness:',
u'\U000026a1': u':high_voltage_sign:',
u'\U0001f52a': u':hocho:',
u'\U0001f573': u':hole:',
u'\U0001f36f': u':honey_pot:',
u'\U0001f6a5': u':horizontal_traffic_light:',
u'\U0001f40e': u':horse:',
u'\U0001f434': u':horse:',
u'\U0001f3c7': u':horse_racing:',
u'\U0001f3e5': u':hospital:',
u'\U0001f32d': u':hot_dog:',
u'\U0001f336': u':hot_pepper:',
u'\U00002668': u':hot_springs:',
u'\U0001f3e8': u':hotel:',
u'\U0000231b': u':hourglass:',
u'\U000023f3': u':hourglass_flowing_sand:',
u'\U0001f3e0': u':house:',
u'\U0001f3d8': u':house_buildings:',
u'\U0001f3e1': u':house_with_garden:',
u'\U0001f917': u':hugging_face:',
u'\U0001f62f': u':hushed:',
u'\U0001f368': u':ice_cream:',
u'\U0001f3d2': u':ice_hockey_stick_and_puck:',
u'\U000026f8': u':ice_skate:',
u'\U0001f366': u':icecream:',
u'\U0001f194': u':id:',
u'\U0001f47f': u':imp:',
u'\U0001f4e5': u':inbox_tray:',
u'\U0001f4e8': u':incoming_envelope:',
u'\U0001f481': u':information_desk_person:',
u'\U00002139': u':information_source:',
u'\U0001f607': u':innocent:',
u'\U0001f523': u':input_symbol_for_symbols:',
u'\U0001f4f1': u':iphone:',
u'\U0001f3ee': u':izakaya_lantern:',
u'\U0001f383': u':jack-o-lantern:',
u'\U0001f5fe': u':japan:',
u'\U0001f3ef': u':japanese_castle:',
u'\U0001f47a': u':japanese_goblin:',
u'\U0001f479': u':japanese_ogre:',
u'\U0001f3e3': u':japanese_post_office:',
u'\U0001f456': u':jeans:',
u'\U0001f579': u':joystick:',
u'\U0001f54b': u':kaaba:',
u'\U0001f511': u':key:',
u'\U00002328': u':keyboard:',
u'\U0001f51f': u':keycap_ten:',
u'\U0001f458': u':kimono:',
u'\U0001f48b': u':kiss:',
u'\U0001f617': u':kissing:',
u'\U0001f63d': u':kissing_cat:',
u'\U0001f61a': u':kissing_closed_eyes:',
u'\U0001f619': u':kissing_face_with_smiling_eyes:',
u'\U0001f428': u':koala:',
u'\U0001f201': u':koko:',
u'\U0001f3f7': u':label:',
u'\U0001f535': u':large_blue_circle:',
u'\U0001f537': u':large_blue_diamond:',
u'\U0001f536': u':large_orange_diamond:',
u'\U0001f534': u':large_red_circle:',
u'\U0001f317': u':last_quarter_moon:',
u'\U0001f31c': u':last_quarter_moon_with_face:',
u'\U0000271d': u':latin_cross:',
u'\U0001f606': u':laughing:',
u'\U0001f343': u':leaf_fluttering_in_wind:',
u'\U0001f4d2': u':ledger:',
u'\U0001f50d': u':left-pointing_magnifying_glass:',
u'\U0001f6c5': u':left_luggage:',
u'\U00002194': u':left_right_arrow:',
u'\U0001f4ac': u':left_speech_bubble:',
u'\U000021a9': u':leftwards_arrow_with_hook:',
u'\U0001f34b': u':lemon:',
u'\U0000264c': u':leo:',
u'\U0001f406': u':leopard:',
u'\U0001f39a': u':level_slider:',
u'\U0000264e': u':libra:',
u'\U0001f688': u':light_rail:',
u'\U0001f517': u':link:',
u'\U0001f587': u':linked_paperclips:',
u'\U0001f981': u':lion_face:',
u'\U0001f444': u':lips:',
u'\U0001f484': u':lipstick:',
u'\U0001f512': u':lock:',
u'\U0001f50f': u':lock_with_ink_pen:',
u'\U0001f36d': u':lollipop:',
u'\U0001f50a': u':loud_sound:',
u'\U0001f62d': u':loudly_crying_face:',
u'\U0001f4e2': u':loudspeaker:',
u'\U0001f3e9': u':love_hotel:',
u'\U0001f48c': u':love_letter:',
u'\U0001f505': u':low_brightness:',
u'\U0001f58a': u':lower_left_ballpoint_pen:',
u'\U0001f58d': u':lower_left_crayon:',
u'\U0001f58b': u':lower_left_fountain_pen:',
u'\U0001f58c': u':lower_left_paintbrush:',
u'\U0001f50e': u':mag_right:',
u'\U0001f004': u':mahjong:',
u'\U0001f4ec': u':mailbox_with_mail:',
u'\U0001f4ed': u':mailbox_with_no_mail:',
u'\U0001f468': u':man:',
u'\U0001f574': u':man_in_business_suit_levitating:',
u'\U0001f472': u':man_with_gua_pi_mao:',
u'\U0001f473': u':man_with_turban:',
u'\U0001f45e': u':mans_shoe:',
u'\U0001f570': u':mantelpiece_clock:',
u'\U0001f341': u':maple_leaf:',
u'\U0001f356': u':meat_on_bone:',
u'\U0001f3c5': u':medal:',
u'\U000026aa': u':medium_white_circle:',
u'\U0001f348': u':melon:',
u'\U0001f4dd': u':memo:',
u'\U0001f54e': u':menorah_with_nine_branches:',
u'\U0001f6b9': u':mens:',
u'\U0001f687': u':metro:',
u'\U0001f3a4': u':microphone:',
u'\U0001f52c': u':microscope:',
u'\U0001f595': u':middle_finger:',
u'\U0001f396': u':military_medal:',
u'\U0001f30c': u':milky_way:',
u'\U0001f690': u':minibus:',
u'\U0001f4bd': u':minidisc:',
u'\U0001f4f4': u':mobile_phone_off:',
u'\U0001f911': u':money-mouth_face:',
u'\U0001f4b0': u':money_bag:',
u'\U0001f4b8': u':money_with_wings:',
u'\U0001f412': u':monkey:',
u'\U0001f435': u':monkey_face:',
u'\U0001f69d': u':monorail:',
u'\U0001f314': u':moon:',
u'\U0001f391': u':moon_viewing_ceremony:',
u'\U0001f54c': u':mosque:',
u'\U0001f324': u':mostly_sunny:',
u'\U0001f6e5': u':motor_boat:',
u'\U0001f6e3': u':motorway:',
u'\U0001f5fb': u':mount_fuji:',
u'\U000026f0': u':mountain:',
u'\U0001f6b5': u':mountain_bicyclist:',
u'\U0001f6a0': u':mountain_cableway:',
u'\U0001f69e': u':mountain_railway:',
u'\U0001f401': u':mouse2:',
u'\U0001f42d': u':mouse:',
u'\U0001f3a5': u':movie_camera:',
u'\U0001f5ff': u':moyai:',
u'\U0001f3b6': u':multiple_musical_notes:',
u'\U0001f344': u':mushroom:',
u'\U0001f3b9': u':musical_keyboard:',
u'\U0001f3b5': u':musical_note:',
u'\U0001f3bc': u':musical_score:',
u'\U0001f507': u':mute:',
u'\U0001f485': u':nail_care:',
u'\U0001f4db': u':name_badge:',
u'\U0001f3de': u':national_park:',
u'\U0001f454': u':necktie:',
u'\U0000274e': u':negative_squared_cross_mark:',
u'\U0001f913': u':nerd_face:',
u'\U0001f610': u':neutral_face:',
u'\U0001f195': u':new:',
u'\U0001f311': u':new_moon:',
u'\U0001f31a': u':new_moon_with_face:',
u'\U0001f4f0': u':newspaper:',
u'\U0001f196': u':ng:',
u'\U0001f303': u':night_with_stars:',
u'\U0001f6b3': u':no_bicycles:',
u'\U000026d4': u':no_entry:',
u'\U0001f6ab': u':no_entry_sign:',
u'\U0001f4f5': u':no_mobile_phones:',
u'\U0001f51e': u':no_one_under_eighteen_symbol:',
u'\U0001f6b7': u':no_pedestrians:',
u'\U0001f6ad': u':no_smoking:',
u'\U0001f6b1': u':non-potable_water:',
u'\U0001f443': u':nose:',
u'\U0001f4d3': u':notebook:',
u'\U0001f4d4': u':notebook_with_decorative_cover:',
u'\U0001f529': u':nut_and_bolt:',
u'\U0001f17e': u':o2:',
u'\U0001f30a': u':ocean:',
u'\U0001f419': u':octopus:',
u'\U0001f362': u':oden:',
u'\U0001f3e2': u':office:',
u'\U0001f6e2': u':oil_drum:',
u'\U0001f197': u':ok:',
u'\U0001f44c': u':ok_hand:',
u'\U0001f5dd': u':old_key:',
u'\U0001f474': u':older_man:',
u'\U0001f475': u':older_woman:',
u'\U0001f549': u':om_symbol:',
u'\U0001f51b': u':on:',
u'\U0001f698': u':oncoming_automobile:',
u'\U0001f68d': u':oncoming_bus:',
u'\U0001f694': u':oncoming_police_car:',
u'\U0001f696': u':oncoming_taxi:',
u'\U0001f4c2': u':open_file_folder:',
u'\U0001f450': u':open_hands:',
u'\U0001f513': u':open_lock:',
u'\U000026ce': u':ophiuchus:',
u'\U0001f4d9': u':orange_book:',
u'\U00002626': u':orthodox_cross:',
u'\U0001f4e4': u':outbox_tray:',
u'\U0001f402': u':ox:',
u'\U0001f4e6': u':package:',
u'\U0001f4c4': u':page_facing_up:',
u'\U0001f4c3': u':page_with_curl:',
u'\U0001f4df': u':pager:',
u'\U0001f334': u':palm_tree:',
u'\U0001f43c': u':panda_face:',
u'\U0001f4ce': u':paperclip:',
u'\U0001f17f': u':parking:',
u'\U0000303d': u':part_alternation_mark:',
u'\U000026c5': u':partly_sunny:',
u'\U0001f326': u':partly_sunny_rain:',
u'\U0001f389': u':party_popper:',
u'\U0001f6f3': u':passenger_ship:',
u'\U0001f6c2': u':passport_control:',
u'\U0000262e': u':peace_symbol:',
u'\U0001f351': u':peach:',
u'\U0001f350': u':pear:',
u'\U0001f6b6': u':pedestrian:',
u'\U0000270f': u':pencil2:',
u'\U0001f427': u':penguin:',
u'\U0001f614': u':pensive:',
u'\U0001f3ad': u':performing_arts:',
u'\U0001f623': u':persevere:',
u'\U0001f64d': u':person_frowning:',
u'\U000026f9': u':person_with_ball:',
u'\U0001f471': u':person_with_blond_hair:',
u'\U0001f64f': u':person_with_folded_hands:',
u'\U0001f64e': u':person_with_pouting_face:',
u'\U000026cf': u':pick:',
u'\U0001f416': u':pig2:',
u'\U0001f437': u':pig:',
u'\U0001f43d': u':pig_nose:',
u'\U0001f48a': u':pill:',
u'\U0001f34d': u':pineapple:',
u'\U00002653': u':pisces:',
u'\U0001f355': u':pizza:',
u'\U0001f6d0': u':place_of_worship:',
u'\U0001f447': u':point_down:',
u'\U0001f448': u':point_left:',
u'\U0001f449': u':point_right:',
u'\U0000261d': u':point_up:',
u'\U0001f446': u':point_up_2:',
u'\U0001f693': u':police_car:',
u'\U0001f6a8': u':police_cars_revolving_light:',
u'\U0001f429': u':poodle:',
u'\U0001f37f': u':popcorn:',
u'\U0001f4ef': u':postal_horn:',
u'\U0001f4ee': u':postbox:',
u'\U0001f372': u':pot_of_food:',
u'\U0001f6b0': u':potable_water:',
u'\U0001f45d': u':pouch:',
u'\U0001f357': u':poultry_leg:',
u'\U0001f63e': u':pouting_cat:',
u'\U0001f621': u':pouting_face:',
u'\U0001f4ff': u':prayer_beads:',
u'\U0001f478': u':princess:',
u'\U0001f5a8': u':printer:',
u'\U0001f49c': u':purple_heart:',
u'\U0001f45b': u':purse:',
u'\U0001f4cc': u':pushpin:',
u'\U0001f6ae': u':put_litter_in_its_place:',
u'\U0001f407': u':rabbit2:',
u'\U0001f430': u':rabbit:',
u'\U0001f3ce': u':racing_car:',
u'\U0001f3cd': u':racing_motorcycle:',
u'\U0001f4fb': u':radio:',
u'\U0001f518': u':radio_button:',
u'\U00002622': u':radioactive_sign:',
u'\U0001f683': u':railway_car:',
u'\U0001f6e4': u':railway_track:',
u'\U0001f308': u':rainbow:',
u'\U0001f590': u':raised_hand_with_fingers_splayed:',
u'\U0001f64c': u':raised_hands:',
u'\U0001f40f': u':ram:',
u'\U0001f35c': u':ramen:',
u'\U0001f400': u':rat:',
u'\U000000ae': u':registered:',
u'\U0000263a': u':relaxed:',
u'\U0001f60c': u':relieved:',
u'\U0001f397': u':reminder_ribbon:',
u'\U0001f501': u':repeat:',
u'\U0001f502': u':repeat_one:',
u'\U0001f6bb': u':restroom:',
u'\U0001f49e': u':revolving_hearts:',
u'\U0001f380': u':ribbon:',
u'\U0001f359': u':rice_ball:',
u'\U0001f358': u':rice_cracker:',
u'\U0001f5ef': u':right_anger_bubble:',
u'\U0001f48d': u':ring:',
u'\U0001f360': u':roasted_sweet_potato:',
u'\U0001f916': u':robot_face:',
u'\U0001f680': u':rocket:',
u'\U0001f5de': u':rolled-up_newspaper:',
u'\U0001f3a2': u':roller_coaster:',
u'\U0001f413': u':rooster:',
u'\U0001f339': u':rose:',
u'\U0001f3f5': u':rosette:',
u'\U0001f4cd': u':round_pushpin:',
u'\U0001f6a3': u':rowboat:',
u'\U0001f3c9': u':rugby_football:',
u'\U0001f3c3': u':runner:',
u'\U0001f3bd': u':running_shirt_with_sash:',
u'\U0001f202': u':sa:',
u'\U00002650': u':sagittarius:',
u'\U0001f376': u':sake:',
u'\U0001f461': u':sandal:',
u'\U0001f4e1': u':satellite:',
u'\U0001f6f0': u':satellite:',
u'\U0001f3b7': u':saxophone:',
u'\U00002696': u':scales:',
u'\U0001f3eb': u':school:',
u'\U0001f392': u':school_satchel:',
u'\U0001f982': u':scorpion:',
u'\U0000264f': u':scorpius:',
u'\U0001f640': u':scream_cat:',
u'\U0001f4dc': u':scroll:',
u'\U0001f4ba': u':seat:',
u'\U0001f648': u':see-no-evil_monkey:',
u'\U0001f331': u':seedling:',
u'\U00002618': u':shamrock:',
u'\U0001f367': u':shaved_ice:',
u'\U0001f411': u':sheep:',
u'\U0001f41a': u':shell:',
u'\U0001f6e1': u':shield:',
u'\U000026e9': u':shinto_shrine:',
u'\U0001f6a2': u':ship:',
u'\U0001f455': u':shirt:',
u'\U0001f320': u':shooting_star:',
u'\U0001f6cd': u':shopping_bags:',
u'\U0001f6bf': u':shower:',
u'\U0001f918': u':sign_of_the_horns:',
u'\U0001f642': u':simple_smile:',
u'\U0001f52f': u':six_pointed_star:',
u'\U0001f3bf': u':ski:',
u'\U000026f7': u':skier:',
u'\U0001f480': u':skull:',
u'\U00002620': u':skull_and_crossbones:',
u'\U0001f634': u':sleeping:',
u'\U0001f6cc': u':sleeping_accommodation:',
u'\U0001f4a4': u':sleeping_symbol:',
u'\U0001f62a': u':sleepy:',
u'\U0001f575': u':sleuth_or_spy:',
u'\U0001f641': u':slightly_frowning_face:',
u'\U0001f3b0': u':slot_machine:',
u'\U0001f6e9': u':small_airplane:',
u'\U0001f539': u':small_blue_diamond:',
u'\U0001f538': u':small_orange_diamond:',
u'\U0001f53a': u':small_red_triangle:',
u'\U0001f604': u':smile:',
u'\U0001f603': u':smiley:',
u'\U0001f63a': u':smiley_cat:',
u'\U0001f608': u':smiling_face_with_horns:',
u'\U0001f605': u':smiling_face_with_open_mouth_and_cold_sweat:',
u'\U0001f60e': u':smiling_face_with_sunglasses:',
u'\U0001f60f': u':smirk:',
u'\U0001f6ac': u':smoking:',
u'\U0001f40c': u':snail:',
u'\U0001f40d': u':snake:',
u'\U0001f3d4': u':snow_capped_mountain:',
u'\U0001f3c2': u':snowboarder:',
u'\U00002744': u':snowflake:',
u'\U00002603': u':snowman:',
u'\U000026c4': u':snowman_without_snow:',
u'\U000026bd': u':soccer:',
u'\U0001f51c': u':soon:',
u'\U0001f198': u':sos:',
u'\U0001f509': u':sound:',
u'\U0001f35d': u':spaghetti:',
u'\U00002747': u':sparkle:',
u'\U00002728': u':sparkles:',
u'\U0001f496': u':sparkling_heart:',
u'\U0001f64a': u':speak-no-evil_monkey:',
u'\U0001f508': u':speaker:',
u'\U0001f5e3': u':speaking_head_in_silhouette:',
u'\U0001f6a4': u':speedboat:',
u'\U0001f577': u':spider:',
u'\U0001f578': u':spider_web:',
u'\U0001f5d3': u':spiral_calendar_pad:',
u'\U0001f5d2': u':spiral_note_pad:',
u'\U0001f4a6': u':splashing_sweat_symbol:',
u'\U0001f596': u':spock-hand:',
u'\U0001f433': u':spouting_whale:',
u'\U0001f199': u':squared_up_with_exclamation_mark:',
u'\U0001f19a': u':squared_vs:',
u'\U0001f3df': u':stadium:',
u'\U00002b50': u':star:',
u'\U0000262a': u':star_and_crescent:',
u'\U00002721': u':star_of_david:',
u'\U0001f689': u':station:',
u'\U0001f5fd': u':statue_of_liberty:',
u'\U0001f682': u':steam_locomotive:',
u'\U000023f1': u':stopwatch:',
u'\U0001f4cf': u':straight_ruler:',
u'\U0001f353': u':strawberry:',
u'\U0001f61d': u':stuck_out_tongue_closed_eyes:',
u'\U0001f61c': u':stuck_out_tongue_winking_eye:',
u'\U0001f399': u':studio_microphone:',
u'\U0001f31e': u':sun_with_face:',
u'\U0001f33b': u':sunflower:',
u'\U0001f305': u':sunrise:',
u'\U0001f304': u':sunrise_over_mountains:',
u'\U0001f3c4': u':surfer:',
u'\U0001f363': u':sushi:',
u'\U0001f69f': u':suspension_railway:',
u'\U0001f3ca': u':swimmer:',
u'\U0001f54d': u':synagogue:',
u'\U0001f489': u':syringe:',
u'\U0001f3d3': u':table_tennis_paddle_and_ball:',
u'\U0001f32e': u':taco:',
u'\U0001f38b': u':tanabata_tree:',
u'\U0001f34a': u':tangerine:',
u'\U00002649': u':taurus:',
u'\U0001f695': u':taxi:',
u'\U0001f375': u':tea:',
u'\U0001f4de': u':telephone_receiver:',
u'\U0001f52d': u':telescope:',
u'\U0001f4fa': u':television:',
u'\U0001f3be': u':tennis:',
u'\U000026fa': u':tent:',
u'\U0001f321': u':thermometer:',
u'\U0001f914': u':thinking_face:',
u'\U0001f4ad': u':thought_balloon:',
u'\U0001f5b1': u':three_button_mouse:',
u'\U000026c8': u':thunder_cloud_and_rain:',
u'\U0001f3ab': u':ticket:',
u'\U0001f405': u':tiger2:',
u'\U0001f42f': u':tiger:',
u'\U000023f2': u':timer_clock:',
u'\U0001f62b': u':tired_face:',
u'\U00002122': u':tm:',
u'\U0001f6bd': u':toilet:',
u'\U0001f5fc': u':tokyo_tower:',
u'\U0001f345': u':tomato:',
u'\U0001f445': u':tongue:',
u'\U0001f51d': u':top:',
u'\U0001f3a9': u':top_hat:',
u'\U0001f5b2': u':trackball:',
u'\U0001f69c': u':tractor:',
u'\U0001f686': u':train2:',
u'\U0001f68b': u':train:',
u'\U0001f68a': u':tram:',
u'\U0001f6a9': u':triangular_flag_on_post:',
u'\U0001f4d0': u':triangular_ruler:',
u'\U0001f531': u':trident:',
u'\U0001f68e': u':trolleybus:',
u'\U0001f3c6': u':trophy:',
u'\U0001f379': u':tropical_drink:',
u'\U0001f420': u':tropical_fish:',
u'\U0001f3ba': u':trumpet:',
u'\U0001f337': u':tulip:',
u'\U0001f983': u':turkey:',
u'\U0001f422': u':turtle:',
u'\U0001f500': u':twisted_rightwards_arrows:',
u'\U0001f495': u':two_hearts:',
u'\U0001f46c': u':two_men_holding_hands:',
u'\U0001f46d': u':two_women_holding_hands:',
u'\U00002602': u':umbrella:',
u'\U000026f1': u':umbrella_on_ground:',
u'\U00002614': u':umbrella_with_rain_drops:',
u'\U0001f612': u':unamused:',
u'\U0001f984': u':unicorn_face:',
u'\U0001f643': u':upside-down_face:',
u'\U0000270c': u':v:',
u'\U0001f6a6': u':vertical_traffic_light:',
u'\U0001f4fc': u':vhs:',
u'\U0001f4f3': u':vibration_mode:',
u'\U0001f4f9': u':video_camera:',
u'\U0001f3ae': u':video_game:',
u'\U0001f3bb': u':violin:',
u'\U0000264d': u':virgo:',
u'\U0001f30b': u':volcano:',
u'\U0001f3d0': u':volleyball:',
u'\U0001f318': u':waning_crescent_moon:',
u'\U0001f316': u':waning_gibbous_moon:',
u'\U000026a0': u':warning:',
u'\U0001f5d1': u':wastebasket:',
u'\U0000231a': u':watch:',
u'\U0001f403': u':water_buffalo:',
u'\U0001f6be': u':water_closet:',
u'\U0001f349': u':watermelon:',
u'\U0001f44b': u':wave:',
u'\U0001f3f4': u':waving_black_flag:',
u'\U0001f3f3': u':waving_white_flag:',
u'\U00003030': u':wavy_dash:',
u'\U0001f312': u':waxing_crescent_moon:',
u'\U0001f629': u':weary:',
u'\U0001f492': u':wedding:',
u'\U0001f3cb': u':weight_lifter:',
u'\U0001f40b': u':whale2:',
u'\U00002638': u':wheel_of_dharma:',
u'\U0000267f': u':wheelchair:',
u'\U00002705': u':white_check_mark:',
u'\U0001f4ae': u':white_flower:',
u'\U00002639': u':white_frowning_face:',
u'\U00002b1c': u':white_large_square:',
u'\U000025fd': u':white_medium_small_square:',
u'\U000025fb': u':white_medium_square:',
u'\U000025ab': u':white_small_square:',
u'\U0001f533': u':white_square_button:',
u'\U0001f32c': u':wind_blowing_face:',
u'\U0001f390': u':wind_chime:',
u'\U0001f377': u':wine_glass:',
u'\U0001f609': u':wink:',
u'\U0001f43a': u':wolf:',
u'\U0001f469': u':woman:',
u'\U0001f45a': u':womans_clothes:',
u'\U0001f452': u':womans_hat:',
u'\U0001f6ba': u':womens:',
u'\U0001f5fa': u':world_map:',
u'\U0001f61f': u':worried:',
u'\U0001f527': u':wrench:',
u'\U0000270d': u':writing_hand:',
u'\U0001f49b': u':yellow_heart:',
u'\U0000262f': u':yin_yang:',
u'\U0001f910': u':zipper-mouth_face:',
}
# Expressions taken from Martijn Pieters code at
# http://stackoverflow.com/questions/26568722/remove-unicode-emoji-using-re-in-python
try:
# Wide UCS-4 build
ALIAS_RE = re.compile(u'['u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\u2600-\u26FF\u2700-\u27BF]+',
re.DOTALL | re.UNICODE)
except re.error:
# Narrow UCS-2 build
ALIAS_RE = re.compile(u'('u'\ud83c[\udf00-\udfff]|'
u'\ud83d[\udc00-\ude4f\ude80-\udeff]|'
u'[\u2600-\u26FF\u2700-\u27BF])+',
re.DOTALL | re.UNICODE)
def convert_emoji_to_aliases(data, modifier, modifier_data, string):
if sys.version_info < (3, ):
string = string.decode('utf-8')
emoji_found = ALIAS_RE.findall(string)
for emoji in emoji_found:
if emoji in EMOJI_ALIASES:
string = string.replace(emoji, EMOJI_ALIASES[emoji])
return string
if __name__ == "__main__" and import_ok:
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", "utf-8"):
w.hook_modifier("irc_in_away", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_cnotice", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_cprivmsg", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_kick", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_knock", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_notice", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_part", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_privmsg", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_quit", "convert_emoji_to_aliases", "")
w.hook_modifier("irc_in_wallops", "convert_emoji_to_aliases", "")
# This intercepts outgoing emoji also
# w.hook_modifier("input_text_for_buffer",
# "convert_emoji_to_aliases", "")
| qguv/config | weechat/plugins/python/emoji2alias.py | Python | gpl-3.0 | 41,578 |
#!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2015 Swift Navigation Inc.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import pprint
from crc24 import *
pp = pprint.PrettyPrinter(indent=2)
def bits2byte(bstring):
bts = []
res = []
i = b_len = len(bstring)
while i % 8 != 0:
i += 1
rem = i - b_len
for i in range(0, rem):
bts += [0]
for i in range(0, b_len):
bts += [int(bstring[i])]
for i in range(0, len(bts), 8):
byte_string = bts[i: i + 8]
byte_string.reverse()
val = 0
for j in range(0, 8):
if byte_string[j] == 1:
val += pow(2, j)
res += [val]
return res
def getbitu(buff, pos, l):
bits = 0
for i in range(0, pos + l):
bits = (bits << 1) + ((buff[i / 8] >> (7 - i % 8)) & 1)
return bits
class SbasMsg:
def __init__(self, bitstring):
self.preamble_bitstring = bitstring[:8]
self.type_bitstring = bitstring[8: 8 + 6]
self.msg_bitstring = bitstring[8 + 6: 226]
self.crc_bitstring = bitstring[226: 226 + 24]
self.type = int(bitstring[8: 8 + 6], 2)
self.crc = 0
self.computed_crc = 0
def check_crc(self):
crc_bytearray = bits2byte(self.crc_bitstring)
msg_bytearray = bits2byte(self.preamble_bitstring + self.type_bitstring + self.msg_bitstring)
computed_crc = crc24(msg_bytearray, len(msg_bytearray))
if len(crc_bytearray) < 3:
raise ValueError("Msg incomplete!")
msg_crc = getbitu(crc_bytearray, 0, 24)
if computed_crc != msg_crc:
raise ValueError("Msg CRC is bad!")
else:
self.crc = hex(msg_crc)
self.computed_crc = hex(computed_crc)
return True
def bitstring(self):
return self.preamble_bitstring + self.msg_bitstring + self.crc_bitstring
class SbasMsgGeo(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'GEO Navigation Message'
self.data = {}
self.fields = [
('IOD', 8, 1, False),
('TOA', 13, 16, False),
('URA', 4, 1, False),
('X', 30, 0.08, True),
('Y', 30, 0.08, True),
('Z', 25, 0.4, True),
('X - Rate of Change', 17, 0.000625, True),
('Y - Rate of Change', 17, 0.000625, True),
('Z - Rate of Change', 18, 0.004, True),
('X - Acceleration', 10, 0.0000125, True),
('Y - Acceleration', 10, 0.0000125, True),
('Z - Acceleration', 10, 0.0000625, True),
('Time Offset', 12, pow(2, -31), True),
('Time Drift', 8, pow(2, -40), True)
]
def __str__(self):
s = self.full_name + " has content: \n"
s += pprint.pformat(self.data, indent=2)
return s
def __eq__(self, other):
return self.__dict__ == other.__dict__
def process(self):
SbasMsg.check_crc(self)
offset = 0
for ix, field in enumerate(self.fields):
val = int(self.msg_bitstring[offset: offset + field[1]], 2)
if field[3]: # 2's complement
if val & (1 << (field[1] - 1)): # MSB set
val = -((1 << field[1]) - val)
self.data[field[0]] = val * field[2] # Scale factor
offset += field[1]
class SbasMsgNull(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Null Message'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgT(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Test Message'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgSM(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Service Message'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgMC(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Mixed Fast Correction/GPS Long Term Satellite Error Corrections'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgII(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Integrity Information'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgLTC(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Long Term Satellite Error Corrections'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgFC(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Fast Correction'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgFCDF(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Fast Correction Degradation Factor'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgCECM(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Clock-Ephemeris Covariance Matrix'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgIDC(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Ionospheric Delay Corrections'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgIGPM(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Ionospheric Grid Point Mask'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgTime(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'SBAS Network Time/UTC Offset Parameters'
self.data = {}
self.fields = [
('A_1wnt', 24, pow(2, -50), True),
('A_0wnt', 32, pow(2, -3), False),
('t_0t', 8, pow(2, 12), False),
('WN_t', 8, 1, False),
('DT_ls', 8, 1, True),
('WN_lsf', 8, 1, False),
('DN', 8, 1, True),
('DT_lsf', 8, 1, True),
('UTC Standard Identifier ', 3, 1, False),
('GPS Time-of-Week – TOW', 20, 1, False),
('GPS Week Number ', 10, 1, False)
]
def __str__(self):
s = self.full_name + " has content: \n"
s += pprint.pformat(self.data, indent=2)
return s
def process(self):
SbasMsg.check_crc(self)
offset = 0
for ix, field in enumerate(self.fields):
val = int(self.msg_bitstring[offset: offset + field[1]], 2)
if field[3]: # 2's complement
if val & (1 << (field[1] - 1)): # MSB set
val = -((1 << field[1]) - val)
self.data[field[0]] = val * field[2] # Scale factor
offset += field[1]
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgIT(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Internal Test'
def process(self):
SbasMsg.check_crc(self)
def __str__(self):
return self.full_name
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgM(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'PRN Mask Assignments'
self.data = []
def process(self):
SbasMsg.check_crc(self)
for idx in range(1, 211):
if self.msg_bitstring[idx - 1] == '1':
self.data += ['PRN ' + str(idx)]
def __str__(self):
s = self.full_name + " has content: \n"
s += pprint.pformat(self.data, indent=2)
return s
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgAlm(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'GEO Almanacs'
self.data = {}
self.fields = [
('Data ID', 2, 1, False),
('PRN', 8, 1, False),
('Health', 8, 1, False),
('X', 15, 2600, True),
('Y', 15, 2600, True),
('Z', 9, 26000, True),
('X - Rate of Change', 3, 10, True),
('Y - Rate of Change', 3, 10, True),
('Z - Rate of Change', 4, 40.96, True),
('Time-of-Day', 11, 64, False)
]
def process(self):
SbasMsg.check_crc(self)
offset = 0
for idx in range(1, 4):
tmp = {}
for ix, field in enumerate(self.fields[:-1]):
val = int(self.msg_bitstring[offset: offset + field[1]], 2)
if field[3]: # 2's complement
if val & (1 << (field[1] - 1)): # MSB set
val = -((1 << field[1]) - val)
tmp[field[0]] = val * field[2] # Scale factor
offset += field[1]
self.data['PRN ' + str(tmp['PRN'])] = tmp
tod = self.fields[len(self.fields) - 1]
offset_tod = tod[1]
tod_val = self.msg_bitstring[offset: offset + offset_tod]
val = int(tod_val, 2)
self.data[tod[0]] = val
def __str__(self):
s = self.full_name + " has content: \n"
s += pprint.pformat(self.data, indent=2)
return s
def __eq__(self, other):
return self.__dict__ == other.__dict__
class SbasMsgDegParam(SbasMsg):
def __init__(self, bitstring):
SbasMsg.__init__(self, bitstring)
self.full_name = 'Degradation Parameters'
self.data = {}
self.fields = [
('B_rcc', 10, 0.002, False),
('C_ltc_lsb', 10, 0.002, False),
('C_ltc_v1', 10, 0.00005, False),
('I_ltc_v1', 9, 1, False),
('C_ltc_v0', 10, 0.002, False),
('I_ltc_v0', 9, 1, False),
('C_geo_lsb', 10, 0.0005, False),
('C_geo_v', 10, 0.00005, False),
('I_geo', 9, 1, False),
('C_er', 6, 0.5, False),
('C_iono_step', 10, 0.001, False),
('I_iono', 9, 1, False),
('C_iono_ramp', 10, 0.000005, False),
('RSS_UDRE', 1, 1, False),
('RSS_iono', 1, 1, False),
('C_covariance', 7, 0.1, False)
]
def process(self):
SbasMsg.check_crc(self)
offset = 0
for ix, field in enumerate(self.fields):
val = int(self.msg_bitstring[offset: offset + field[1]], 2)
if field[3]: # 2's complement
if val & (1 << (field[1] - 1)): # MSB set
val = -((1 << field[1]) - val)
self.data[field[0]] = val * field[2] # Scale factor
offset += field[1]
def __str__(self):
s = self.full_name + " has content: \n"
s += pprint.pformat(self.data, indent=2)
return s
def __eq__(self, other):
return self.__dict__ == other.__dict__
| StefanD986/peregrine | peregrine/sbas_msgs.py | Python | gpl-3.0 | 11,705 |
from django.conf.urls import url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'testproject.views.home', name='home'),
# url(r'^testproject/', include('testproject.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
| CloudNcodeInc/django-phonenumber-field | testproject/testproject/urls.py | Python | mit | 541 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-21 13:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20170118_0955'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='resume',
),
migrations.AddField(
model_name='user',
name='phone_number',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='concentration',
field=models.CharField(choices=[('CS', 'Computer Science'), ('D', 'Design'), ('B', 'Business'), ('EE', 'Electrical Engineering'), ('M', 'Math'), ('MIS', 'Management Information Systems'), ('O', 'Other')], default='O', max_length=3),
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('N', 'Non - binary'), ('P', 'Prefer not to answer')], default='P', max_length=1),
),
]
| TexasLAN/texaslan.org | texaslan/users/migrations/0009_auto_20170221_0755.py | Python | mit | 1,209 |
"""Provide functionality to record stream."""
import logging
import os
import threading
from typing import List
import av
from homeassistant.core import callback
from .const import OUTPUT_CONTAINER_FORMAT
from .core import Segment, StreamOutput
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_recorder(hass):
"""Only here so Provider Registry works."""
def recorder_save_worker(file_out: str, segments: List[Segment], container_format):
"""Handle saving stream."""
if not os.path.exists(os.path.dirname(file_out)):
os.makedirs(os.path.dirname(file_out), exist_ok=True)
first_pts = {"video": None, "audio": None}
output = av.open(file_out, "w", format=container_format)
output_v = None
output_a = None
# Get first_pts values from first segment
if len(segments) > 0:
segment = segments[0]
source = av.open(segment.segment, "r", format=container_format)
source_v = source.streams.video[0]
first_pts["video"] = source_v.start_time
if len(source.streams.audio) > 0:
source_a = source.streams.audio[0]
first_pts["audio"] = int(
source_v.start_time * source_v.time_base / source_a.time_base
)
source.close()
for segment in segments:
# Open segment
source = av.open(segment.segment, "r", format=container_format)
source_v = source.streams.video[0]
# Add output streams
if not output_v:
output_v = output.add_stream(template=source_v)
context = output_v.codec_context
context.flags |= "GLOBAL_HEADER"
if not output_a and len(source.streams.audio) > 0:
source_a = source.streams.audio[0]
output_a = output.add_stream(template=source_a)
# Remux video
for packet in source.demux():
if packet.dts is None:
continue
packet.pts -= first_pts[packet.stream.type]
packet.dts -= first_pts[packet.stream.type]
packet.stream = output_v if packet.stream.type == "video" else output_a
output.mux(packet)
source.close()
output.close()
class RecorderOutput(StreamOutput):
"""Represents HLS Output formats."""
def __init__(self, hass) -> None:
"""Initialize recorder output."""
super().__init__(hass)
self.video_path = None
self._segments = []
def _async_put(self, segment: Segment) -> None:
"""Store output."""
self._segments.append(segment)
def prepend(self, segments: List[Segment]) -> None:
"""Prepend segments to existing list."""
segments = [s for s in segments if s.sequence not in self._segments]
self._segments = segments + self._segments
def save(self):
"""Write recording and clean up."""
_LOGGER.debug("Starting recorder worker thread")
thread = threading.Thread(
name="recorder_save_worker",
target=recorder_save_worker,
args=(self.video_path, self._segments, OUTPUT_CONTAINER_FORMAT),
)
thread.start()
self._segments = []
| turbokongen/home-assistant | homeassistant/components/stream/recorder.py | Python | apache-2.0 | 3,182 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile = models.TextField(blank=True)
picture = models.ImageField(default=settings.DEFAULT_PROFILE_IMAGE)
# Function to create Profile each time a User is created
def create_profile(sender, **kwargs):
new_user = kwargs['instance']
if kwargs['created']:
user_profile = Profile(user=new_user)
user_profile.save()
# Set post_save signal on User model
post_save.connect(create_profile, sender=User)
| peromo93/instapix | accounts/models.py | Python | mit | 772 |
""" tools for handling fasta files """
from Bio import SeqIO
from contextlib import nested
import os
def count_seqio(in_file, predicate, kind):
""" counts the records in a file that pass a predicate """
with open(in_file) as in_handle:
return reduce(lambda x, y: x + 1,
filter(predicate, SeqIO.parse(in_handle, kind)), 0)
def passed_seqio(in_file, predicate, kind):
""" returns the number of records that pass a predicate
and the total number of records as a tuple (passed, total) """
with open(in_file) as in_handle:
passed = 0
total = 0
for record in SeqIO.parse(in_handle, kind):
if predicate(record):
passed += 1
total += 1
return (passed, total)
def apply_seqio(in_file, f, kind):
""" apply a function f to every record in in_file """
with open(in_file) as in_handle:
return map(f, SeqIO.parse(in_handle, kind))
def filter_fasta(in_file, predicate, out_file):
""" filters a fasta by a predicate """
filter_seqio(in_file, predicate, out_file, kind="fasta")
return out_file
def filter_seqio(in_file, predicate, out_file, kind="fasta"):
# skip if the output file already exists
if os.path.exists(out_file):
return out_file
with nested(open(in_file, "rU"),
open(out_file, "w")) as (in_handle, out_handle):
def output_writer(x):
return(SeqIO.write(x, out_handle, kind))
#map(SeqIO.write(x, out_handle, kind),
map(output_writer, filter(predicate, SeqIO.parse(in_handle, kind)))
return out_file
| roryk/bipy | bipy/toolbox/fasta.py | Python | mit | 1,632 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
# NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
# NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
# NOTE(sirp): was image '6'
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
# NOTE(sirp): was image '7'
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def index(self, context, **kwargs):
"""Returns list of images."""
retval = []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
return retval
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def get(self, context, image_id, data):
metadata = self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
return metadata
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', utils.gen_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
self.images.clear()
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
| rcbops/nova-buildpackage | nova/image/fake.py | Python | apache-2.0 | 9,011 |
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
import sys
sys.path.append( '.' )
from sfepy.base.base import *
def define():
"""Define the problem to solve."""
filename_mesh = 'el3.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
field_1 = {
'name' : 'displacement',
'dtype' : nm.float64,
'shape' : (3,),
'region' : 'Omega',
'approx_order' : 1,
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
material_1 = {
'name' : 'solid',
'values' : {
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
'lam' : 5.769, # Lame coefficients for LE term
'mu_le' : 3.846,
}
}
material_2 = {
'name' : 'load',
'function' : 'empty'
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : ('all', {}),
'Bottom' : ('nodes in (z < 0.1)', {}),
'Top' : ('nodes in (z > 2.9)', {}),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
##
# Balance of forces.
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o1_d3',
}
integral_3 = {
'name' : 'isurf',
'kind' : 's4',
'quadrature' : 'gauss_o2_d2',
}
equations = {
'linear' : """dw_lin_elastic_iso.i1.Omega( solid.lam, solid.mu_le, v, u )
= dw_surface_ltr.isurf.Top( load.val, v )""",
'neoHookean' : """dw_tl_he_neohook.i1.Omega( solid.mu_nh, v, u )
+ dw_tl_bulk_penalty.i1.Omega( solid.K, v, u )
= dw_surface_ltr.isurf.Top( load.val, v )""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i1.Omega( solid.mu_mr, v, u )
+ dw_tl_he_mooney_rivlin.i1.Omega( solid.kappa, v, u )
+ dw_tl_bulk_penalty.i1.Omega( solid.K, v, u )
= dw_surface_ltr.isurf.Top( load.val, v )""",
}
##
# Solvers etc.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp': 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
solver_2 = {
'name' : 'ts',
'kind' : 'ts.simple',
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000,
'cache_override' : False,
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, region=None, ig=None):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, region=None, ig=None):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u( displacements ):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store( problem, ts, state ):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region( top )
displacements.append( nm.mean( top_u[:,-1] ) )
return _store
def solve_branch( problem, options, branch_function ):
from sfepy.solvers.generic import solve_evolutionary_op
load = problem.materials['load']
load.set_function( branch_function )
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations( {key : eq} )
out = []
aux = solve_evolutionary_op( problem,
save_results = False,
step_hook = store_top_u( out ) )
displacements[key] = nm.array( out, dtype = nm.float64 )
return displacements
def main():
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.fem import ProblemDefinition
from sfepy.base.plotutils import plt
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file( __file__, required, other )
# Create problem instance, but do not set equations.
problem = ProblemDefinition.from_conf( conf,
init_equations = False )
options = Struct( output_filename_trunk = None )
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch( problem, options, linear_tension )
u_c = solve_branch( problem, options, linear_compression )
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from( 0 )],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from( 0 )],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
print 'matplotlib cannot be imported, printing raw data!'
print displacements
print load
else:
legend = []
for key, val in displacements.iteritems():
plt.plot( load, val )
legend.append( key )
plt.legend( legend, loc = 2 )
plt.xlabel( 'tension [kPa]' )
plt.ylabel( 'displacement [mm]' )
plt.grid( True )
plt.gcf().savefig( 'pressure_displacement.png' )
plt.show()
if __name__ == '__main__':
main()
| olivierverdier/sfepy | examples/standalone/elastic_materials/compare_elastic_materials.py | Python | bsd-3-clause | 7,019 |
import json
from django.http import HttpResponse
from django.shortcuts import render
from torrents.logic import active_torrents_info
def active(request):
if request.is_ajax():
content = {"torrents": active_torrents_info()}
return HttpResponse(json.dumps(content), content_type="application/json")
return render(request, "torrents/active.html")
| onepesu/django_transmission | torrents/views.py | Python | mit | 371 |
__author__ = 'civa'
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'app_user'
user_id = Column(Integer, primary_key=True)
username = Column(String(50), nullable=False)
password = Column(String(50), nullable=False)
email_address = Column(String(100), nullable=False)
access_token = Column(String(250), nullable=False)
status = Column(Integer, nullable=False)
isValid = Column(bool, nullable=False)
logged_in = Column(bool, nullable=False)
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
| Civa/Zenith | src/Backend/Distributed/hubs/auth/model/user.py | Python | gpl-3.0 | 985 |
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from MOOCworkbench.production_settings import REDIS_HOST
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MOOCworkbench.settings')
broker_host = 'redis://{0}:6379/0'.format(REDIS_HOST)
app = Celery('MOOCworkbench', broker=broker_host)
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| MOOCworkbench/MOOCworkbench | MOOCworkbench/celery.py | Python | mit | 860 |
import warnings
import sys
import numpy as np
from numpy.testing import *
import unittest
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
class TestArrayEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
self._test_not_equal(c, b)
class TestEqual(TestArrayEqual):
def setUp(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_almost_equal
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almost_equal
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, 1))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.nan, np.inf))
self.assertRaises(AssertionError,
lambda : self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
class TestApproxEqual(unittest.TestCase):
def setUp(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
x = np.array([1234.22])
y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
self.assertRaises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, aone))
self.assertRaises(AssertionError,
lambda : self._assert_func(anan, ainf))
self.assertRaises(AssertionError,
lambda : self._assert_func(ainf, anan))
class TestRaises(unittest.TestCase):
def setUp(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
f = raises(self.e)(self.raises_exception)(self.e)
def test_wrong_exception(self):
try:
f = raises(self.e)(self.raises_exception)(RuntimeError)
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
f = raises(self.e)(self.does_not_raise_exception)()
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
before_filters = sys.modules['warnings'].filters[:]
assert_warns(UserWarning, f)
after_filters = sys.modules['warnings'].filters
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose(unittest.TestCase):
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
self.assertRaises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
self.assertRaises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
self.assertRaises(AssertionError, assert_allclose, a, b,
rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
self.assertRaises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
class TestArrayAlmostEqualNulp(unittest.TestCase):
def test_simple(self):
dev = np.random.randn(10)
x = np.ones(10)
y = x + dev * np.finfo(np.float64).eps
assert_array_almost_equal_nulp(x, y, nulp=2 * np.max(dev))
def test_simple2(self):
x = np.random.randn(10)
y = 2 * x
def failure():
return assert_array_almost_equal_nulp(x, y,
nulp=1000)
self.assertRaises(AssertionError, failure)
def test_big_float32(self):
x = (1e10 * np.random.randn(10)).astype(np.float32)
y = x + 1
assert_array_almost_equal_nulp(x, y, nulp=1000)
def test_big_float64(self):
x = 1e10 * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex(self):
x = np.random.randn(10) + 1j * np.random.randn(10)
y = x + 1
def failure():
assert_array_almost_equal_nulp(x, y, nulp=1000)
self.assertRaises(AssertionError, failure)
def test_complex2(self):
x = np.random.randn(10)
y = np.array(x, np.complex) + 1e-16 * np.random.randn(10)
assert_array_almost_equal_nulp(x, y, nulp=1000)
class TestULP(unittest.TestCase):
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
if __name__ == '__main__':
run_module_suite()
| qsnake/numpy | numpy/testing/tests/test_utils.py | Python | bsd-3-clause | 15,034 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Hook to make the commit command automatically close bugs when the commit
message contains `Fix #number` or `Implement #number`. Also updates the commit
message with the summary of the closed bug.
'''
import re, urllib, importlib, sys, json
from lxml import html
SENDMAIL = ('/home/kovid/work/env', 'pgp_mail')
LAUNCHPAD_BUG = 'https://bugs.launchpad.net/calibre/+bug/%s'
GITHUB_BUG = 'https://api.github.com/repos/kovidgoyal/calibre/issues/%s'
BUG_PAT = r'(Fix|Implement|Fixes|Fixed|Implemented)\s+#(\d+)'
class Bug:
def __init__(self):
self.seen = set()
def __call__(self, match):
action, bug = match.group(1), match.group(2)
summary = ''
if bug in self.seen:
return match.group()
self.seen.add(bug)
if int(bug) > 100000: # Launchpad bug
raw = urllib.urlopen(LAUNCHPAD_BUG % bug).read()
try:
h1 = html.fromstring(raw).xpath('//h1[@id="edit-title"]')[0]
summary = html.tostring(h1, method='text', encoding=unicode).strip()
except:
summary = 'Private bug'
else:
summary = json.loads(urllib.urlopen(GITHUB_BUG % bug).read())['title']
if summary:
print ('Working on bug:', summary)
if int(bug) > 100000:
self.close_bug(bug, action)
return match.group() + ' (%s)' % summary
return match.group()
def close_bug(self, bug, action):
print ('Closing bug #%s'% bug)
suffix = ('The fix will be in the next release. '
'calibre is usually released every Friday.')
action += 'ed'
msg = '%s in branch %s. %s'%(action, 'master', suffix)
msg = msg.replace('Fixesed', 'Fixed')
msg += '\n\n status fixreleased'
sys.path.insert(0, SENDMAIL[0])
sendmail = importlib.import_module(SENDMAIL[1])
to = bug+'@bugs.launchpad.net'
sendmail.sendmail(msg, to, 'Fixed in master')
def main():
with open(sys.argv[-1], 'r+b') as f:
raw = f.read().decode('utf-8')
bug = Bug()
msg = re.sub(BUG_PAT, bug, raw)
if msg != raw:
f.seek(0)
f.truncate()
f.write(msg.encode('utf-8'))
if __name__ == '__main__':
main()
| sss/calibre-at-bzr | setup/git_pre_commit_hook.py | Python | gpl-3.0 | 2,553 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'impact_functions_doc_base.ui'
#
# Created: Wed Mar 20 16:03:36 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ImpactFunctionsDocBase(object):
def setupUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setObjectName(_fromUtf8("ImpactFunctionsDocBase"))
ImpactFunctionsDocBase.resize(821, 733)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/inasafe/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ImpactFunctionsDocBase.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(ImpactFunctionsDocBase)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.myButtonBox = QtGui.QDialogButtonBox(ImpactFunctionsDocBase)
self.myButtonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.myButtonBox.setAutoFillBackground(False)
self.myButtonBox.setOrientation(QtCore.Qt.Horizontal)
self.myButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Close|QtGui.QDialogButtonBox.Help|QtGui.QDialogButtonBox.Reset)
self.myButtonBox.setCenterButtons(False)
self.myButtonBox.setObjectName(_fromUtf8("myButtonBox"))
self.gridLayout.addWidget(self.myButtonBox, 1, 1, 1, 1)
self.gridLayoutMain = QtGui.QGridLayout()
self.gridLayoutMain.setHorizontalSpacing(0)
self.gridLayoutMain.setObjectName(_fromUtf8("gridLayoutMain"))
self.label_title = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_title.sizePolicy().hasHeightForWidth())
self.label_title.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setObjectName(_fromUtf8("label_title"))
self.gridLayoutMain.addWidget(self.label_title, 1, 0, 1, 1)
self.label_id = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_id.sizePolicy().hasHeightForWidth())
self.label_id.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_id.setFont(font)
self.label_id.setObjectName(_fromUtf8("label_id"))
self.gridLayoutMain.addWidget(self.label_id, 1, 1, 1, 1)
self.label_subcategory = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_subcategory.sizePolicy().hasHeightForWidth())
self.label_subcategory.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_subcategory.setFont(font)
self.label_subcategory.setObjectName(_fromUtf8("label_subcategory"))
self.gridLayoutMain.addWidget(self.label_subcategory, 1, 3, 1, 1)
self.label_category = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_category.sizePolicy().hasHeightForWidth())
self.label_category.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_category.setFont(font)
self.label_category.setObjectName(_fromUtf8("label_category"))
self.gridLayoutMain.addWidget(self.label_category, 1, 2, 1, 1)
self.label_layertype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_layertype.sizePolicy().hasHeightForWidth())
self.label_layertype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_layertype.setFont(font)
self.label_layertype.setObjectName(_fromUtf8("label_layertype"))
self.gridLayoutMain.addWidget(self.label_layertype, 1, 4, 1, 1)
self.comboBox_id = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_id.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_id.setObjectName(_fromUtf8("comboBox_id"))
self.gridLayoutMain.addWidget(self.comboBox_id, 3, 1, 1, 1)
self.comboBox_title = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_title.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.comboBox_title.setMinimumContentsLength(0)
self.comboBox_title.setObjectName(_fromUtf8("comboBox_title"))
self.gridLayoutMain.addWidget(self.comboBox_title, 3, 0, 1, 1)
self.comboBox_category = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_category.setObjectName(_fromUtf8("comboBox_category"))
self.gridLayoutMain.addWidget(self.comboBox_category, 3, 2, 1, 1)
self.label_unit = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_unit.sizePolicy().hasHeightForWidth())
self.label_unit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_unit.setFont(font)
self.label_unit.setObjectName(_fromUtf8("label_unit"))
self.gridLayoutMain.addWidget(self.label_unit, 1, 6, 1, 1)
self.label_datatype = QtGui.QLabel(ImpactFunctionsDocBase)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_datatype.sizePolicy().hasHeightForWidth())
self.label_datatype.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_datatype.setFont(font)
self.label_datatype.setObjectName(_fromUtf8("label_datatype"))
self.gridLayoutMain.addWidget(self.label_datatype, 1, 5, 1, 1)
self.comboBox_subcategory = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_subcategory.setObjectName(_fromUtf8("comboBox_subcategory"))
self.gridLayoutMain.addWidget(self.comboBox_subcategory, 3, 3, 1, 1)
self.comboBox_layertype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_layertype.setObjectName(_fromUtf8("comboBox_layertype"))
self.gridLayoutMain.addWidget(self.comboBox_layertype, 3, 4, 1, 1)
self.comboBox_datatype = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_datatype.setObjectName(_fromUtf8("comboBox_datatype"))
self.gridLayoutMain.addWidget(self.comboBox_datatype, 3, 5, 1, 1)
self.comboBox_unit = QtGui.QComboBox(ImpactFunctionsDocBase)
self.comboBox_unit.setObjectName(_fromUtf8("comboBox_unit"))
self.gridLayoutMain.addWidget(self.comboBox_unit, 3, 6, 1, 1)
self.webView = QtWebKit.QWebView(ImpactFunctionsDocBase)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.gridLayoutMain.addWidget(self.webView, 4, 0, 1, 7)
self.gridLayout.addLayout(self.gridLayoutMain, 0, 1, 1, 1)
self.retranslateUi(ImpactFunctionsDocBase)
QtCore.QObject.connect(self.myButtonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ImpactFunctionsDocBase.reject)
QtCore.QMetaObject.connectSlotsByName(ImpactFunctionsDocBase)
def retranslateUi(self, ImpactFunctionsDocBase):
ImpactFunctionsDocBase.setWindowTitle(QtGui.QApplication.translate("ImpactFunctionsDocBase", "InaSAFE Impact Functions", None, QtGui.QApplication.UnicodeUTF8))
self.label_title.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Title", None, QtGui.QApplication.UnicodeUTF8))
self.label_id.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "ID", None, QtGui.QApplication.UnicodeUTF8))
self.label_subcategory.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Subcategory", None, QtGui.QApplication.UnicodeUTF8))
self.label_category.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Category", None, QtGui.QApplication.UnicodeUTF8))
self.label_layertype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Layer Type", None, QtGui.QApplication.UnicodeUTF8))
self.label_unit.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Unit", None, QtGui.QApplication.UnicodeUTF8))
self.label_datatype.setText(QtGui.QApplication.translate("ImpactFunctionsDocBase", "Data Type", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
import resources_rc
| rukku/inasafe | safe_qgis/impact_functions_doc_base.py | Python | gpl-3.0 | 9,760 |
__author__ = 'rich'
| pirandig/bootstrap-breadcrumbs | bootstrap_breadcrumbs/templatetags/__init__.py | Python | bsd-3-clause | 20 |
########################################################################
#
# File Name: HTMLUListElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLUListElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLUListElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="UL"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_compact(self):
return self.hasAttribute("COMPACT")
def _set_compact(self, value):
if value:
self.setAttribute("COMPACT", "COMPACT")
else:
self.removeAttribute("COMPACT")
def _get_type(self):
return string.capitalize(self.getAttribute("TYPE"))
def _set_type(self, value):
self.setAttribute("TYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"compact" : _get_compact,
"type" : _get_type
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"compact" : _set_compact,
"type" : _set_type
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| iCarto/siga | extScripting/scripts/jython/Lib/xml/dom/html/HTMLUListElement.py | Python | gpl-3.0 | 1,686 |
# Copyright 2019 Ecosoft Co., Ltd. (http://ecosoft.co.th)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class Picking(models.Model):
_inherit = "stock.picking"
require_wa = fields.Boolean(compute="_compute_require_wa")
wa_id = fields.Many2one(
comodel_name="work.acceptance",
string="WA Reference",
domain="[('id', 'in', wa_ids)]",
copy=False,
)
wa_ids = fields.Many2many(
comodel_name="work.acceptance",
compute="_compute_wa_ids",
)
def _compute_require_wa(self):
for rec in self:
if rec.picking_type_id.bypass_wa:
rec.require_wa = False
continue
if rec.picking_type_code == "incoming":
rec.require_wa = self.env.user.has_group(
"purchase_work_acceptance.group_enforce_wa_on_in"
)
continue
rec.require_wa = False
@api.depends("require_wa")
def _compute_wa_ids(self):
for picking in self:
picking.wa_ids = (
self.env["work.acceptance"]
.sudo()
._get_valid_wa("picking", picking.purchase_id.id)
)
def button_validate(self):
for picking in self:
if picking.wa_id:
order_id = self._context.get("active_id")
wa = (
self.env["work.acceptance"]
.sudo()
._get_valid_wa("picking", order_id)
)
wa += picking.wa_id
if picking.wa_id not in wa:
raise ValidationError(
_("%s was used in some picking.") % picking.wa_id.name
)
wa_line = {}
for line in picking.wa_id.wa_line_ids:
qty = line.product_uom._compute_quantity(
line.product_qty, line.product_id.uom_id
)
if qty > 0.0 and line.product_id.type in ["product", "consu"]:
if line.product_id.id in wa_line.keys():
qty_old = wa_line[line.product_id.id]
wa_line[line.product_id.id] = qty_old + qty
else:
wa_line[line.product_id.id] = qty
move_line = {}
for move in picking.move_ids_without_package:
qty = move.product_uom._compute_quantity(
move.quantity_done, move.product_id.uom_id
)
if qty > 0.0:
if move.product_id.id in move_line.keys():
qty_old = move_line[move.product_id.id]
move_line[move.product_id.id] = qty_old + qty
else:
move_line[move.product_id.id] = qty
if wa_line != move_line:
raise ValidationError(
_(
"You cannot validate a transfer if done"
" quantity not equal accepted quantity"
)
)
return super(Picking, self).button_validate()
@api.onchange("wa_id")
def _onchange_wa_id(self):
if self.wa_id:
wa_line = {}
for line in self.wa_id.wa_line_ids:
qty = line.product_uom._compute_quantity(
line.product_qty, line.product_id.uom_id
)
if line.product_id.id in wa_line.keys():
qty_old = wa_line[line.product_id.id]
wa_line[line.product_id.id] = qty_old + qty
else:
wa_line[line.product_id.id] = qty
for move_line in self.move_line_ids_without_package:
if move_line.product_id.id in wa_line.keys():
qty = wa_line[move_line.product_id.id]
if move_line.product_uom_qty < qty:
move_line._origin.qty_done = move_line.product_uom_qty
wa_line[line.product_id.id] = qty - move_line.product_uom_qty
else:
move_line._origin.qty_done = qty
class StockPickingType(models.Model):
_inherit = "stock.picking.type"
bypass_wa = fields.Boolean(
string="WA not required",
help="When 'Enforce WA on Goods Receipt' is set, this option type can by pass it",
)
| OCA/purchase-workflow | purchase_work_acceptance/models/stock_picking.py | Python | agpl-3.0 | 4,677 |
import irclib
import urllib
import BeautifulSoup
class eat:
def on_pubmsg(self, nick, connection, event):
message = event.arguments()[0]
source = event.source().split('!')[0]
if message.startswith(".eat"):
message = message[5:]
name = message
message = message.replace(" ", "%20")
url = "http://www.wikieat.org/people/" + message.title()
page = urllib.urlopen(url)
soup = str(BeautifulSoup.BeautifulSoup(page.read()))
if "Nothing here yet" in soup:
connection.privmsg(event.target(), "Found nothing")
else:
response = name.title() + " with food << wikiEat :: " + url
connection.privmsg(event.target(), response)
| Oozemeister/Gypsy | modules/pubmsg/eat.py | Python | gpl-2.0 | 677 |
__all__ = ["markdown", "mdx_code", "mdx_footnotes", "mdx_lifeflow"]
| lethain/lifeflow | markdown/__init__.py | Python | mit | 68 |
#---------------------------------------------------------------------------
# Copyright 2010, 2011 Sushil J. Louis and Christopher E. Miles,
# Evolutionary Computing Systems Laboratory, Department of Computer Science
# and Engineering, University of Nevada, Reno.
#
# This file is part of OpenECSLENT
#
# OpenECSLENT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenECSLENT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenECSLENT. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
#-------------------------End Copyright Notice------------------------------
SELECTION_RADIUS_SQUARED = 10000
from engine import transform
from engine.vector import vector3
class SelectionHandler: #perhaps a manager? loading a level would change the plane to check against
def __init__(self, context, mouse): # shush about all the assignments below. I am too old to type the fqn although I know why you should
self.context = context
self.mouse = self.context.engine.inputSystem.mouse
self.mouse.capture()
ms = self.mouse.getMouseState()
ms.height = self.context.engine.gfxSystem.renderWindow.getHeight()
ms.width = self.context.engine.gfxSystem.renderWindow.getWidth()
self.selectedEnts = []
def checkAndSetSelected(self, mouseEvent):
import pdb; pdb.set_trace()
for ent in self.selectedEnts:
ent.selectionCircle.hide()
self.selectedEnts = []
pos, ents = self.cursorToWorld(mouseEvent.get_state())
for ent in ents:
self.selectedEnts.append(ent)
#self.state.selectedEntities.append(ent)
#ent.gfxNode.showBoundingBox(True)
ent.selectionCircle.show()
def cursorToWorld(self, ms):
mouseRay = self.context.engine.cameraSystem.camera.getCameraToViewportRay(ms.X.abs/float(ms.width), ms.Y.abs/float(ms.height))
result = mouseRay.intersects(self.context.engine.gfxSystem.waterPlane)
if result.first:
pos = mouseRay.getPoint(result.second)
return self.checkForEntsInRadius(pos, SELECTION_RADIUS_SQUARED)
else:
return (None, [])
def checkForEntsInRadius(self, pos, radiusSquared):
listOfEnts = []
for ent in self.context.engine.entMgr.ents:
if ent.hasSquad:
for squadMember in ent.squad.squadMembers:
d = pos.squaredDistance(ent.pos)
if d < radiusSquared:
listOfEnts.append(ent)
print str(ent)
return (pos, listOfEnts)
| bgaunt/openEcslent | engine/gui/selectionHandler.py | Python | gpl-3.0 | 3,155 |
import datetime
import importlib
import os
import sys
from django.apps import apps
from django.db.models.fields import NOT_PROVIDED
from django.utils import timezone
from .loader import MigrationLoader
class MigrationQuestioner:
"""
Give the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None, dry_run=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
self.dry_run = dry_run
def ask_initial(self, app_label):
"""Should we create an initial migration for the app?"""
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the Python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label)
if migrations_import_path is None:
# It's an application with migrations disabled.
return self.defaults.get("ask_initial", False)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
# getattr() needed on PY36 and older (replace with attribute access).
if getattr(migrations_module, "__file__", None):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"""Changing a NULL field to NOT NULL."""
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"""Was this field really renamed?"""
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"""Was this model really renamed?"""
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"""Do you really want to merge these migrations?"""
return self.defaults.get("ask_merge", False)
def ask_auto_now_add_addition(self, field_name, model_name):
"""Adding an auto_now_add field to a model."""
# None means quit
return None
class InteractiveMigrationQuestioner(MigrationQuestioner):
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while not result or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
except ValueError:
pass
else:
if 0 < value <= len(choices):
return value
result = input("Please select a valid option: ")
def _ask_default(self, default=''):
"""
Prompt for a default value.
The ``default`` argument allows providing a custom default value (as a
string) which will be shown to the user and used as the return value
if the user doesn't provide any other input.
"""
print("Please enter the default value now, as valid Python")
if default:
print(
"You can accept the default '{}' by pressing 'Enter' or you "
"can provide another value.".format(default)
)
print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now")
print("Type 'exit' to exit this prompt")
while True:
if default:
prompt = "[default: {}] >>> ".format(default)
else:
prompt = ">>> "
code = input(prompt)
if not code and default:
code = default
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {'datetime': datetime, 'timezone': timezone})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default; "
"we can't do that (the database needs something to populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
("Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"),
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_not_null_alteration(self, field_name, model_name):
"""Changing a NULL field to NOT NULL."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to change the nullable field '%s' on %s to non-nullable "
"without a default; we can't do that (the database needs something to "
"populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
("Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"),
("Ignore for now, and let me handle existing rows with NULL myself "
"(e.g. because you added a RunPython or RunSQL operation to handle "
"NULL values in a previous data migration)"),
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
return NOT_PROVIDED
elif choice == 3:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"""Was this field really renamed?"""
msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]"
return self._boolean_input(msg % (model_name, old_name, model_name, new_name,
field_instance.__class__.__name__), False)
def ask_rename_model(self, old_model_state, new_model_state):
"""Was this model really renamed?"""
msg = "Did you rename the %s.%s model to %s? [y/N]"
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
def ask_merge(self, app_label):
return self._boolean_input(
"\nMerging will only work if the operations printed above do not conflict\n" +
"with each other (working on different fields or models)\n" +
"Do you want to merge these migration branches? [y/N]",
False,
)
def ask_auto_now_add_addition(self, field_name, model_name):
"""Adding an auto_now_add field to a model."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to add the field '{}' with 'auto_now_add=True' "
"to {} without a default; the database needs something to "
"populate existing rows.\n".format(field_name, model_name),
[
"Provide a one-off default now (will be set on all "
"existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default(default='timezone.now')
return None
class NonInteractiveMigrationQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
sys.exit(3)
def ask_not_null_alteration(self, field_name, model_name):
# We can't ask the user, so set as not provided.
return NOT_PROVIDED
def ask_auto_now_add_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
sys.exit(3)
| sametmax/Django--an-app-at-a-time | ignore_this_directory/django/db/migrations/questioner.py | Python | mit | 9,918 |
# Copyright 2011 Jamie Norrish (jamie@artefact.org.nz)
# Copyright 2008 - 2010 Lars Heuer (heuer[at]semagia.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing functions for generating signatures for Topic
Maps constructs.
These functions can be used to detect duplicates: if two Topic Maps
constructs have the same signature, they should be merged (if they
belong to the same parent).
Neither the topic map, the parent, the reifier, nor item identifiers
are accounted for in generating the signature.
It is a port of the Java code written by Lars Heuer for the tinyTiM
project (http://tinytim.sourceforget.net/).
"""
from name import Name
def generate_association_signature (association):
"""Generates the signature for an association.
:param association: the association to generate the signature for
:type association: `Assocation`
:rtype: tuple
"""
return (_generate_type_signature(association),
_generate_scope_signature(association),
_generate_roles_signature(association.get_roles()))
def generate_name_signature (name):
"""Generates the signature for the specified name.
The parent and the variants are not taken into account.
:param name: the name to generate the signature for
:type name: `Name`
:rtype: tuple
"""
return (_generate_type_signature(name),
_generate_scope_signature(name),
_generate_data_signature(name))
def generate_occurrence_signature (occurrence):
"""Generates the signature for an occurrence.
:param occurrence: the occurrence to generate the signature for
:type occurrence: `Occurrence`
:rtype: tuple
"""
return (_generate_type_signature(occurrence),
_generate_scope_signature(occurrence),
_generate_data_signature(occurrence))
def generate_role_signature (role):
"""Generates the signature for a role.
:param role: the role to generate the signature for
:type role: `Role`
:rtype: tuple
"""
return (_signature(role.get_type()), _signature(role.get_player()))
def generate_variant_signature (variant):
"""Generates the signature for the specified `variant`.
:param variant: the variant to generate the signature for
:type variant: `Variant`
:rtype: tuple
"""
return (_generate_scope_signature(variant),
_generate_data_signature(variant))
def _generate_data_signature (construct):
"""Returns the signature for a value/datatype pair.
:param construct: the construct to generate the signature for
:type construct: `Occurrence` or `Variant`
:rtype: tuple
"""
if isinstance(construct, Name):
datatype = None
else:
datatype = hash(construct.get_datatype().to_external_form())
return (datatype, hash(construct.get_value()))
def _generate_roles_signature (roles):
"""Returns the signature for the specified roles.
:param roles: the roles to generate the signature for
:type roles: `QuerySet` of `Role`s
:rtype: frozenset
"""
if len(roles) == 0:
return 0
signature = []
for role in roles:
signature.append(generate_role_signature(role))
return frozenset(signature)
def _generate_scope_signature (scoped):
"""Returns the signature for the scope of a scoped Topic Maps
construct.
This function returns the signature for the scope only. No other
properties of the scoped construct are taken into account.
:param scoped: the scoped Topic Maps construct
:type scoped: `Scoped`
:rtype: frozenset
"""
return frozenset(scoped.get_scope())
def _generate_type_signature (typed):
"""Returns the signature for the type of a typed Topic Maps construct.
:param typed: the typed Topic Maps construct
:type typed: `Typed`
:rtype: integer
"""
return _signature(typed.get_type())
def _signature (topic):
"""Returns the signature of the specified topic.
:param topic: the topic to generate the signature for
:type topic: `Topic`
:rtype: integer
"""
return topic.id
| ajenhl/django-tmapi | tmapi/models/signature.py | Python | apache-2.0 | 4,627 |
"""Support for myStrom Wifi bulbs."""
import logging
from pymystrom.bulb import MyStromBulb
from pymystrom.exceptions import MyStromConnectionError
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_EFFECT,
SUPPORT_FLASH,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "myStrom bulb"
SUPPORT_MYSTROM = SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_FLASH | SUPPORT_COLOR
EFFECT_RAINBOW = "rainbow"
EFFECT_SUNRISE = "sunrise"
MYSTROM_EFFECT_LIST = [EFFECT_RAINBOW, EFFECT_SUNRISE]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the myStrom light integration."""
host = config.get(CONF_HOST)
mac = config.get(CONF_MAC)
name = config.get(CONF_NAME)
bulb = MyStromBulb(host, mac)
try:
await bulb.get_state()
if bulb.bulb_type != "rgblamp":
_LOGGER.error("Device %s (%s) is not a myStrom bulb", host, mac)
return
except MyStromConnectionError:
_LOGGER.warning("No route to myStrom bulb: %s", host)
raise PlatformNotReady()
async_add_entities([MyStromLight(bulb, name, mac)], True)
class MyStromLight(LightEntity):
"""Representation of the myStrom WiFi bulb."""
def __init__(self, bulb, name, mac):
"""Initialize the light."""
self._bulb = bulb
self._name = name
self._state = None
self._available = False
self._brightness = 0
self._color_h = 0
self._color_s = 0
self._mac = mac
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._mac
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_MYSTROM
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Return the color of the light."""
return self._color_h, self._color_s
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def effect_list(self):
"""Return the list of supported effects."""
return MYSTROM_EFFECT_LIST
@property
def is_on(self):
"""Return true if light is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn on the light."""
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
effect = kwargs.get(ATTR_EFFECT)
if ATTR_HS_COLOR in kwargs:
color_h, color_s = kwargs[ATTR_HS_COLOR]
elif ATTR_BRIGHTNESS in kwargs:
# Brightness update, keep color
color_h, color_s = self._color_h, self._color_s
else:
color_h, color_s = 0, 0 # Back to white
try:
if not self.is_on:
await self._bulb.set_on()
if brightness is not None:
await self._bulb.set_color_hsv(
int(color_h), int(color_s), round(brightness * 100 / 255)
)
if effect == EFFECT_SUNRISE:
await self._bulb.set_sunrise(30)
if effect == EFFECT_RAINBOW:
await self._bulb.set_rainbow(30)
except MyStromConnectionError:
_LOGGER.warning("No route to myStrom bulb")
async def async_turn_off(self, **kwargs):
"""Turn off the bulb."""
try:
await self._bulb.set_off()
except MyStromConnectionError:
_LOGGER.warning("myStrom bulb not online")
async def async_update(self):
"""Fetch new state data for this light."""
try:
await self._bulb.get_state()
self._state = self._bulb.state
colors = self._bulb.color
try:
color_h, color_s, color_v = colors.split(";")
except ValueError:
color_s, color_v = colors.split(";")
color_h = 0
self._color_h = int(color_h)
self._color_s = int(color_s)
self._brightness = int(color_v) * 255 / 100
self._available = True
except MyStromConnectionError:
_LOGGER.warning("No route to myStrom bulb")
self._available = False
| nkgilley/home-assistant | homeassistant/components/mystrom/light.py | Python | apache-2.0 | 4,976 |
import pickle
from qanta.guesser.abstract import AbstractGuesser
from qanta.guesser.dan import DanGuesser
from qanta.util.constants import BUZZER_TRAIN_FOLD, BUZZER_DEV_FOLD
guesser_directory = AbstractGuesser.output_path(
"qanta.guesser.dan", "DanGuesser", 0, ""
)
guesser = DanGuesser.load(guesser_directory) # type: AbstractGuesser
guesser.batch_size /= 8
word_skip = 2
folds = [BUZZER_TRAIN_FOLD, BUZZER_DEV_FOLD]
for fold in folds:
df = guesser.generate_guesses(1, [fold], word_skip=word_skip)
output_path = AbstractGuesser.guess_path(guesser_directory, fold)
with open(output_path, "wb") as f:
pickle.dump(df, f)
| Pinafore/qb | generate_guesses.py | Python | mit | 646 |
import deepdish as dd
class Foo(dd.util.SaveableRegistry):
def __init__(self, x):
self.x = x
@classmethod
def load_from_dict(self, d):
obj = Foo(d['x'])
return obj
def save_to_dict(self):
return {'x': self.x}
@Foo.register('bar')
class Bar(Foo):
def __init__(self, x, y):
self.x = x
self.y = y
@classmethod
def load_from_dict(self, d):
obj = Bar(d['x'], d['y'])
return obj
def save_to_dict(self):
return {'x': self.x, 'y': self.y}
| agarbuno/deepdish | doc/source/codefiles/saveable_example.py | Python | bsd-3-clause | 542 |
from bpy_extras.view3d_utils import location_3d_to_region_2d
def render_get_resolution_(r):
xres = int(r.resolution_x * r.resolution_percentage * 0.01)
yres = int(r.resolution_y * r.resolution_percentage * 0.01)
return xres, yres
def render_get_aspect_(r, camera=None, x=-1, y=-1):
if x != -1 and y != -1:
xratio = x * r.pixel_aspect_x / 200.0
yratio = y * r.pixel_aspect_y / 200.0
else:
xres, yres = render_get_resolution_(r)
xratio = xres * r.pixel_aspect_x / 200.0
yratio = yres * r.pixel_aspect_y / 200.0
if camera is None or camera.type != 'PERSP':
fit = 'AUTO'
else:
fit = camera.sensor_fit
if fit == 'HORIZONTAL' or fit == 'AUTO' and xratio > yratio:
aspectratio = xratio / yratio
xaspect = aspectratio
yaspect = 1.0
elif fit == 'VERTICAL' or fit == 'AUTO' and yratio > xratio:
aspectratio = yratio / xratio
xaspect = 1.0
yaspect = aspectratio
else:
aspectratio = xaspect = yaspect = 1.0
return xaspect, yaspect, aspectratio
def get_viewport_cam_borders(ob, render, region, region_data, scene):
# Code reference:
# https://blender.stackexchange.com/questions/6377/coordinates-of-corners-of-camera-view-border
cam = ob.data
frame = cam.view_frame(scene=scene)
# move from object-space into world-space
frame = [ob.matrix_world @ v for v in frame]
# move into pixelspace
frame_px = [location_3d_to_region_2d(region, region_data, v) for v in frame]
min_x = -1
min_y = -1
max_x = -1
max_y = -1
for v in frame_px:
if min_x == -1:
min_x = v[0]
elif min_x > v[0]:
min_x = v[0]
if max_x < v[0]:
max_x = v[0]
if min_y == -1:
min_y = v[1]
elif min_y > v[1]:
min_y = v[1]
if max_y < v[1]:
max_y = v[1]
cam_width = max_x - min_x
cam_height = max_y - min_y
x0 = min_x + render.border_min_x * cam_width
x1 = min_x + render.border_max_x * cam_width
y0 = min_y + render.border_min_y * cam_height
y1 = min_y + render.border_max_y * cam_height
return (x0, x1, y0, y1) | prman-pixar/RenderManForBlender | rfb_utils/camera_utils.py | Python | mit | 2,235 |
#!/usr/bin/python
"""
Module to test RO manager RDF report creation function
"""
import os, os.path
import sys
import re
import shutil
import logging
import datetime
import StringIO
import json
import unittest
log = logging.getLogger(__name__)
if __name__ == "__main__":
# Add main project directory and ro manager directories at start of python path
sys.path.insert(0, "../..")
sys.path.insert(0, "..")
# sys.path.insert(0, "/usr/workspace/github-rdfextras")
# sys.path.insert(0, "/usr/workspace/github-rdflib")
import rdflib
from MiscUtils import TestUtils
from rocommand.ro_namespaces import RDF, DCTERMS, RO, AO, ORE
from iaeval.ro_minim import MINIM
import RdfReport
import TrafficLightReports
# Base directory for RO tests in this module
testbase = os.path.dirname(os.path.abspath(__file__))
simple_test_data = testbase+"/data/simple-test-data.rdf"
trafficlight_test_data = testbase+"/data/trafficlight-test-data.rdf"
def LIT(l): return rdflib.Literal(l)
def REF(u): return rdflib.URIRef(u)
prefixes = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX rdfg: <http://www.w3.org/2004/03/trix/rdfg-1/>
PREFIX ore: <http://www.openarchives.org/ore/terms/>
PREFIX ao: <http://purl.org/ao/>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX ro: <http://purl.org/wf4ever/ro#>
PREFIX wfprov: <http://purl.org/wf4ever/wfprov#>
PREFIX wfdesc: <http://purl.org/wf4ever/wfdesc#>
PREFIX wf4ever: <http://purl.org/wf4ever/wf4ever#>
PREFIX minim: <http://purl.org/minim/minim#>
PREFIX result: <http://www.w3.org/2001/sw/DataAccess/tests/result-set#>
PREFIX ex: <http://example.org/terms/>
"""
class TestRdfReport(unittest.TestCase):
"""
Test RDF report generator
Report definition structure:
report-defn = { 'report': template-item } // May add more later
template-item = sequence | query-template // Bindings to date are fed down into template-item processing
sequence = [ template-item, ... ]
query-template = { 'query': sparql-query [None],
'max': integer [1],
'output': python-format-string [None],
'report': template-item [None],
'alt': python-format-string [None],
}
"""
def setUp(self):
super(TestRdfReport, self).setUp()
return
def tearDown(self):
super(TestRdfReport, self).tearDown()
return
# Actual tests follow
def testNull(self):
assert True, 'Null test failed'
def testEscapeJSON(self):
s = []
for i in range(0,128):
s.append(unichr(i))
s = "".join(s)
s_esc = RdfReport.escape_json(s)
e_esc = ( u'\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007'+
u'\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f'+
u'\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017'+
u'\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001f'+
u' !\\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`'+
u'abcdefghijklmnopqrstuvwxyz{|}~\\u007f')
# print "----"
# print repr(s_esc)
# print repr(e_esc)
self.assertEqual(s_esc, e_esc)
s_loads = json.loads('"'+s_esc+'"')
self.assertEqual(s_loads, s)
return
def testHelloWorld(self):
"""
Test just about the simplest possible report
"""
report = (
{ 'report':
{ 'output': "Hello world" }
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Hello world", outstr.getvalue())
return
def testSimpleQuery(self):
"""
Test a simple query report
"""
report = (
{ 'report':
{ 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator }"
, 'output': "Hello %(creator)s" }
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Hello Graham", outstr.getvalue())
return
def testSimpleQuotedJson(self):
"""
Test JSON quoting in simple report
"""
report = (
{ 'report':
{ 'query': prefixes+"SELECT * WHERE { ?s ex:quoteme ?quoteme }"
, 'output': "Hello %(quoteme_esc)s" }
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr, RdfReport.escape_json)
self.assertEqual("""Hello <\\\"Graham\\">""", outstr.getvalue())
return
def testSimpleQuotedHtml(self):
"""
Test HTML quoting in simple report
"""
report = (
{ 'report':
{ 'query': prefixes+"SELECT * WHERE { ?s ex:quoteme ?quoteme }"
, 'output': "Hello %(quoteme_esc)s" }
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr, RdfReport.escape_html)
self.assertEqual("""Hello <"Graham">""", outstr.getvalue())
return
def testQueryResultMerge(self):
"""
Test a simple query merged with existing results
"""
report = (
{ 'report':
{ 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator }"
, 'output': "Hello %(creator)s and %(name)s"
}
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {'name': 'anyone'}, outstr)
self.assertEqual("Hello Graham and anyone", outstr.getvalue())
return
def testQueryResultPreBinding(self):
"""
Test a simple query with pre-bound result value
"""
report = (
{ 'report':
{ 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator; rdfs:label ?label }"
, 'output': "Hello %(creator)s"
}
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Hello Graham", outstr.getvalue())
outstr = StringIO.StringIO()
RdfReport.generate_report(report, rdfgraph, {'label': rdflib.Literal('simple-test-data')}, outstr)
self.assertEqual("Hello Graham", outstr.getvalue())
return
def testSequence(self):
"""
Test a sequence report
"""
report = (
{ 'report':
[ { 'output': "Foreword: " }
, { 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator }"
, 'output': "Hello %(creator)s; "
}
, { 'output': "afterword." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Foreword: Hello Graham; afterword.", outstr.getvalue())
return
def testAlternative(self):
"""
Test an alterantive (query not matched) report
"""
report = (
{ 'report':
[ { 'output': "Foreword: " }
, { 'query': prefixes+"SELECT * WHERE { ?s ex:notfound ?creator }"
, 'output': "Hello %(creator)s; "
, 'alt': "Is %(name)s there? "
}
, { 'output': "afterword." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {'name': "anyone"}, outstr)
self.assertEqual("Foreword: Is anyone there? afterword.", outstr.getvalue())
return
def testAlternativeMissing(self):
"""
Test an alterantive (query not matched) report with no alternative given
"""
report = (
{ 'report':
[ { 'output': "Foreword: " }
, { 'query': prefixes+"SELECT * WHERE { ?s ex:notfound ?creator }"
, 'output': "Hello %(creator)s; "
}
, { 'output': "afterword." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {'name': "anyone"}, outstr)
self.assertEqual("Foreword: afterword.", outstr.getvalue())
return
def testRepetition(self):
"""
Test a report with a repeated match
"""
report = (
{ 'report':
[ { 'output': "Tags: " }
, { 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator; ex:tag ?tag } ORDER BY ?tag"
, 'output': "%(tag)s"
, 'sep': ", "
}
, { 'output': "." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Tags: tag1, tag2, tag3, tag4.", outstr.getvalue())
return
def testRepetitionMax(self):
"""
Test a report with a repeated match
"""
report = (
{ 'report':
[ { 'output': "Tags: " }
, { 'query': prefixes+"SELECT * WHERE { ?s dcterms:creator ?creator; ex:tag ?tag } ORDER BY ?tag"
, 'output': "%(tag)s"
, 'sep': ", "
, 'max': 2
}
, { 'output': "." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Tags: tag1, tag2.", outstr.getvalue())
return
def testRepetitionAlt(self):
"""
Test a report with a repeated match
"""
report = (
{ 'report':
[ { 'output': "Tags: " }
, { 'query': prefixes+"SELECT * WHERE { ?s ex:notag ?tag } ORDER BY ?tag"
, 'output': "%(tag)s"
, 'sep': ", "
, 'alt': "none"
}
, { 'output': "." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
self.assertEqual("Tags: none.", outstr.getvalue())
return
def testNesting(self):
"""
Test a report with a nested sub-report
"""
report = (
{ 'report':
[ { 'query': prefixes+
"SELECT ?s ?title ?label WHERE { ?s dcterms:title ?title; rdfs:label ?label } "+
"ORDER BY DESC(?label)"
, 'output': "\nFound %(title)s:"
, 'report':
[ {'output': "\nTags for %(label)s: " }
, { 'query': prefixes+"SELECT * WHERE { ?s ex:tag ?tag } ORDER BY ?tag"
, 'output': "%(tag)s"
, 'sep': ", "
, 'alt': "none"
}
, { 'output': "." }
, { 'output': "\nFinished %(title)s." }
]
}
, { 'output': "\nFinished all." }
]
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(simple_test_data)
RdfReport.generate_report(report, rdfgraph, {}, outstr)
expected = ("\n"+
"Found Simple test data:\n"+
"Tags for simple-test-data: tag1, tag2, tag3, tag4.\n"+
"Finished Simple test data.\n"+
"Found More test data:\n"+
"Tags for more-test-data: more1, more2.\n"+
"Finished More test data.\n"+
"Finished all."+
"")
result = outstr.getvalue()
# print "\n-----"
# print result
# print "-----"
self.assertEqual(expected, result)
return
def testQueryForNesting(self):
testdata = """<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:xsd="http://www.w3.org/2001/XMLSchema#"
xmlns:dct="http://purl.org/dc/terms/"
xmlns:ex="http://example.org/terms/"
>
<rdf:Description rdf:about="simple-test-data.rdf">
<rdfs:label>Label</rdfs:label>
<dct:title>Title</dct:title>
</rdf:Description>
</rdf:RDF>
"""
teststream = StringIO.StringIO(testdata)
rdfgraph = rdflib.Graph()
rdfgraph.parse(teststream)
query = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX dct: <http://purl.org/dc/terms/>
SELECT * WHERE { ?s dct:title ?title; rdfs:label ?label } ORDER BY DESC(?label)
"""
resp = rdfgraph.query(query, initBindings={ 'title': "foo" })
self.assertEqual(resp.type, 'SELECT')
bindings = resp.bindings
count = 0
for b in bindings:
count += 1
print "\nResult bindings %d:"%count
for k in b:
print "%s: %s"%(str(k), str(b[k]))
self.assertEquals(b['title'], "foo")
self.assertEquals(b['label'], "Label")
return
def testReportEvalResultUri(self):
"""
Test report that selects one of the following test result status URIs from:
http://purl.org/minim/minim#fullySatifies
http://purl.org/minim/minim#nominallySatifies
http://purl.org/minim/minim#minimallySatifies
http://purl.org/minim/minim#potentiallySatisfies
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
checklist = "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf"
initvars = (
{ 'target': rdflib.URIRef(rouristr)
, 'minim': rdflib.URIRef(checklist+"#Runnable_model")
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
RdfReport.generate_report(TrafficLightReports.EvalTargetResultUri, rdfgraph, initvars, outstr)
expected = "http://purl.org/minim/minim#minimallySatisfies"
result = outstr.getvalue()
# print "\n-----"
# print result
# print "-----"
self.assertEqual(expected, result)
return
def testReportEvalResultLabel(self):
"""
Test report that selects a result label string
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
checklist = "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf"
initvars = (
{ 'target': rdflib.URIRef(rouristr)
, 'minim': rdflib.URIRef(checklist+"#Runnable_model")
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
RdfReport.generate_report(TrafficLightReports.EvalTargetResultLabel, rdfgraph, initvars, outstr)
expected = "minimally satisfies"
result = outstr.getvalue()
# print "\n-----"
# print result
# print "-----"
self.assertEqual(expected, result)
return
def testReportEvalResultClass(self):
"""
Test report of a textual status summary of a checklist match
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
checklist = "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf"
initvars = (
{ 'target': rdflib.URIRef(rouristr)
, 'minim': rdflib.URIRef(checklist+"#Runnable_model")
})
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
RdfReport.generate_report(TrafficLightReports.EvalTargetResultClass, rdfgraph, initvars, outstr)
expected = '"fail", "should"'
result = outstr.getvalue()
# print "\n-----"
# print result
# print "-----"
self.assertEqual(expected, result)
return
def testReportEvalItemJSON(self):
"""
Test report of a textual status summary of a checklist match
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
checklist = "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf"
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
initvars = (
{ 'result': rdfgraph.value(predicate=RDF.type, object=MINIM.Result)
, 'rouri': rdflib.URIRef(rouristr)
, 'modeluri': rdflib.URIRef(checklist+"#Runnable_model")
, 'itemuri': rdflib.URIRef(checklist+"#workflow_inputs_accessible")
, 'itemlevel': rdflib.URIRef("http://purl.org/minim/minim#missingShould")
})
outstr = StringIO.StringIO()
RdfReport.generate_report(TrafficLightReports.EvalItemJson, rdfgraph, initvars, outstr)
expected = (
[ ''
, '''{ "itemuri": "%s#workflow_inputs_accessible"'''%(checklist)
, ''', "itemlabel": '''+
'''"Workflow %sdocs/mkjson.sh input %sdata/UserRequirements-bio.ods is not accessible"'''%
(rouristr, rouristr)
, ''', "itemlevel": "http://purl.org/minim/minim#missingShould"'''
, ''', "itemsatisfied": false'''
, ''', "itemclass": ["fail", "should"]'''
])
result = outstr.getvalue()
# print "\n-----"
# print result
# print "-----"
resultlines = result.split('\n')
for i in range(len(expected)):
self.assertEqual(expected[i], resultlines[i].strip())
return
def testTrafficlightJSON(self):
"""
Test report generating traffic-light JSON (per data/mockup.json)
"""
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
RdfReport.generate_report(TrafficLightReports.EvalChecklistJson, rdfgraph, {}, outstr)
# Test the non-item output only. The previous test checks itemized output.
expected = (
[ ''''''
, '''{ "rouri": "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"'''
, ''', "roid": "simple-requirements"'''
, ''', "title": "A simple test RO"'''
, ''', "description": "A simple RO used for testing traffic light display."'''
, ''', "checklisturi": "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf#Runnable_model"'''
, ''', "checklistpurpose": "Runnable"'''
, ''', "checklisttarget": "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"'''
, ''', "checklisttargetid": "simple-requirements"'''
, ''', "checklisttargetlabel": "simple-requirements"'''
, ''', "evalresult": "http://purl.org/minim/minim#minimallySatisfies"'''
, ''', "evalresultlabel": "minimally satisfies"'''
, ''', "evalresultclass": ["fail", "should"]'''
])
result = outstr.getvalue()
log.debug("---- JSON result\n%s\n----"%(result))
resultlines = result.split('\n')
for i in range(len(expected)):
self.assertEqual(expected[i], resultlines[i].strip())
# Check that output is valid JSON
resultdict = json.loads(result)
self.assertEqual(resultdict['rouri'],
"file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/")
return
def testReportEvalItemHTML(self):
"""
Test report of a textual status summary of a checklist match
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
checklist = "file:///usr/workspace/wf4ever-ro-manager/Checklists/runnable-wf-trafficlight/checklist.rdf"
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
initvars = (
{ 'result': rdfgraph.value(predicate=RDF.type, object=MINIM.Result)
, 'rouri': rdflib.URIRef(rouristr)
, 'modeluri': rdflib.URIRef(checklist+"#Runnable_model")
, 'itemuri': rdflib.URIRef(checklist+"#workflow_inputs_accessible")
, 'itemlevel': rdflib.URIRef("http://purl.org/minim/minim#missingShould")
})
outstr = StringIO.StringIO()
RdfReport.generate_report(TrafficLightReports.EvalItemHtml, rdfgraph, initvars, outstr)
expected = (
[ ''
, '''<tr class="sub_result">'''
, '''<td></td>'''
, '''<td class="trafficlight small fail should"><div/></td>'''
, '''<td>Workflow %sdocs/mkjson.sh input %sdata/UserRequirements-bio.ods is not accessible</td>'''%
(rouristr, rouristr)
, '''</tr>'''
])
result = outstr.getvalue()
#print "\n-----"
#print result
#print "-----"
resultlines = result.split('\n')
for i in range(len(expected)):
self.assertEqual(expected[i], resultlines[i].strip())
return
def testTrafficlightHTML(self):
"""
Test report generating traffic-light HTML (per data/mockup.html)
"""
rouristr = "file:///usr/workspace/wf4ever-ro-catalogue/v0.1/simple-requirements/"
outstr = StringIO.StringIO()
rdfgraph = rdflib.Graph()
rdfgraph.parse(trafficlight_test_data)
RdfReport.generate_report(TrafficLightReports.EvalChecklistHtml, rdfgraph, {}, outstr)
# Test the non-item output only. The previous test checks itemized output.
expected = (
[ '''<title>Research Object Runnable evaluation - simple-requirements</title>'''
, '''</head>'''
, '''<body>'''
, '''<div class="Container">'''
, '''<div class="header">'''
, '''A simple test RO'''
, '''</div>'''
, '''<div class="content">'''
, '''<div class="sub_header">A simple RO used for testing traffic light display.</div>'''
, '''<div class="body">'''
, '''<table>'''
, '''<thead>'''
, '''<tr class="main_result">'''
, '''<th class="trafficlight large fail should"><div/></th>'''
, '''<th colspan="2">Target <span class="target">'''
, '''<a href="%s">simple-requirements</a></span>'''%(rouristr)
, '''<span class="testresult">minimally satisfies</span> checklist for'''
, '''<span class="testpurpose">Runnable</span>.'''
# , '''<p>This Research Object @@TODO.</p>'''
, '''</th>'''
, '''</tr>'''
])
result = outstr.getvalue()
#print "\n-----"
#print result
#print "-----"
resultlines = result.split('\n')
for i in range(len(expected)):
# Skip 1st 8 lines of generated HTML:
self.assertEqual(expected[i], resultlines[i+8].strip())
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "Pending tests follow"
# Assemble test suite
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
, "testEscapeJSON"
, "testHelloWorld"
, "testSimpleQuery"
, "testSimpleQuotedJson"
, "testSimpleQuotedHtml"
, "testQueryResultMerge"
, "testQueryResultPreBinding"
, "testSequence"
, "testAlternative"
, "testAlternativeMissing"
, "testRepetition"
, "testRepetitionMax"
, "testRepetitionAlt"
, "testQueryForNesting"
, "testNesting"
, "testReportEvalResultUri"
, "testReportEvalResultLabel"
, "testReportEvalResultClass"
, "testReportEvalItemJSON"
, "testTrafficlightJSON"
, "testReportEvalItemHTML"
, "testTrafficlightHTML"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestRdfReport, testdict, select=select)
if __name__ == "__main__":
TestUtils.runTests("TestRdfReport.log", getTestSuite, sys.argv)
# End.
| wf4ever/ro-manager | src/roweb/test/TestRdfReport.py | Python | mit | 27,177 |
# -*- coding: utf-8 -*-
#
# pysimplelog documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 12 22:54:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
#import os
#import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysimplelog'
copyright = u'2016, Bachir Aoun'
author = u'Bachir Aoun'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
# get pysimplelog version
from pysimplelog import __version__ as VER
from pysimplelog import SimpleLog
SimpleLog.__doc__ = SimpleLog.__doc__.replace("%AUTO_VERSION", VER)
except:
VER = "unknown"
version = VER
# The full version, including alpha/beta/rc tags.
release = VER
rst_epilog = """
.. |VERSION| replace:: %s
"""%(VER,)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# set autodoc order
autodoc_member_order = 'bysource'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
#html_theme_options = {}
html_theme_options = {
'github_user': "bachiraoun",
'github_repo': "pysimplelog",
'github_banner': True,
'show_powered_by':False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u'PYthon SIMPLE LOGger package. pysimplelog v%s'%VER
html_short_title = html_title
project_name = html_title
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [],
'using/windows': [],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysimplelogdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysimplelog.tex', u'pysimplelog Documentation',
u'Bachir Aoun', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysimplelog', u'pysimplelog Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysimplelog', u'pysimplelog Documentation',
author, 'pysimplelog', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| bachiraoun/pysimplelog | docs/source/conf.py | Python | agpl-3.0 | 10,451 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from io import BytesIO
from django.core.exceptions import ValidationError
from django.core.validators import BaseValidator
from django.utils.translation import ugettext_lazy as _
from PIL import Image
class BaseSizeValidator(BaseValidator):
"""Base validator that validates the size of an image."""
def compare(self, x):
return True
def __init__(self, width, height):
self.limit_value = width, height
def __call__(self, value):
cleaned = self.clean(value)
if self.compare(cleaned, self.limit_value):
params = {
'with': self.limit_value[0],
'height': self.limit_value[1],
}
raise ValidationError(self.message, code=self.code, params=params)
@staticmethod
def clean(value):
value.seek(0)
stream = BytesIO(value.read())
img = Image.open(stream)
return img.size
class MaxSizeValidator(BaseSizeValidator):
"""
ImageField validator to validate the max with and height of an image.
You may use float("inf") as an infinite boundary.
"""
def compare(self, img_size, max_size):
return img_size[0] > max_size[0] or img_size[1] > max_size[1]
message = _('The image you uploaded is too large.'
' The required maximum resolution is:'
' %(with)sx%(height)s px.')
code = 'max_resolution'
class MinSizeValidator(BaseSizeValidator):
"""
ImageField validator to validate the min with and height of an image.
You may use float("inf") as an infinite boundary.
"""
def compare(self, img_size, min_size):
return img_size[0] < min_size[0] or img_size[1] < min_size[1]
message = _('The image you uploaded is too small.'
' The required minimum resolution is:'
' %(with)sx%(height)s px.')
| epowers/django-stdimage | stdimage/validators.py | Python | mit | 1,947 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from shinken_test import *
import os
import sys
import re
import subprocess
import shutil
import time
import random
import copy
from shinken.brok import Brok
from shinken.objects.timeperiod import Timeperiod
from shinken.objects.module import Module
from shinken.objects.service import Service
from shinken.modules.livestatus_broker.mapping import Logline
from shinken.modules.logstore_sqlite import LiveStatusLogStoreSqlite
from shinken.comment import Comment
sys.setcheckinterval(10000)
class TestConfig(ShinkenTest):
def contains_line(self, text, pattern):
regex = re.compile(pattern)
for line in text.splitlines():
if re.search(regex, line):
return True
return False
def update_broker(self, dodeepcopy=False):
# The brok should be manage in the good order
ids = self.sched.brokers['Default-Broker']['broks'].keys()
ids.sort()
for brok_id in ids:
brok = self.sched.brokers['Default-Broker']['broks'][brok_id]
#print "Managing a brok type", brok.type, "of id", brok_id
#if brok.type == 'update_service_status':
# print "Problem?", brok.data['is_problem']
if dodeepcopy:
brok = copy.deepcopy(brok)
brok.prepare()
self.livestatus_broker.manage_brok(brok)
self.sched.brokers['Default-Broker']['broks'] = {}
def tearDown(self):
self.livestatus_broker.db.commit()
self.livestatus_broker.db.close()
if os.path.exists(self.livelogs):
os.remove(self.livelogs)
if os.path.exists(self.livelogs + "-journal"):
os.remove(self.livelogs + "-journal")
if os.path.exists("tmp/archives"):
for db in os.listdir("tmp/archives"):
print "cleanup", db
os.remove(os.path.join("tmp/archives", db))
if os.path.exists('var/nagios.log'):
os.remove('var/nagios.log')
if os.path.exists('var/retention.dat'):
os.remove('var/retention.dat')
if os.path.exists('var/status.dat'):
os.remove('var/status.dat')
self.livestatus_broker = None
class TestConfigSmall(TestConfig):
def setUp(self):
self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
Comment.id = 1
self.testid = str(os.getpid() + random.randint(1, 1000))
self.init_livestatus()
print "Cleaning old broks?"
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
self.update_broker()
self.nagios_path = None
self.livestatus_path = None
self.nagios_config = None
# add use_aggressive_host_checking so we can mix exit codes 1 and 2
# but still get DOWN state
host = self.sched.hosts.find_by_name("test_host_0")
host.__class__.use_aggressive_host_checking = 1
def write_logs(self, host, loops=0):
for loop in range(0, loops):
host.state = 'DOWN'
host.state_type = 'SOFT'
host.attempt = 1
host.output = "i am down"
host.raise_alert_log_entry()
host.state = 'UP'
host.state_type = 'HARD'
host.attempt = 1
host.output = "i am down"
host.raise_alert_log_entry()
self.update_broker()
def test_hostsbygroup(self):
self.print_header()
now = time.time()
objlist = []
print "-------------------------------------------"
print "Service.lsm_host_name", Service.lsm_host_name
print "Logline.lsm_current_host_name", Logline.lsm_current_host_name
print "-------------------------------------------"
for host in self.sched.hosts:
objlist.append([host, 0, 'UP'])
for service in self.sched.services:
objlist.append([service, 0, 'OK'])
self.scheduler_loop(1, objlist)
self.update_broker()
request = """GET hostsbygroup
ColumnHeaders: on
Columns: host_name hostgroup_name
Filter: groups >= allhosts
OutputFormat: csv
KeepAlive: on
ResponseHeader: fixed16
"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
def test_one_log(self):
self.print_header()
host = self.sched.hosts.find_by_name("test_host_0")
now = time.time()
time_warp(-3600)
num_logs = 0
host.state = 'DOWN'
host.state_type = 'SOFT'
host.attempt = 1
host.output = "i am down"
host.raise_alert_log_entry()
time.sleep(3600)
host.state = 'UP'
host.state_type = 'HARD'
host.attempt = 1
host.output = "i am up"
host.raise_alert_log_entry()
time.sleep(3600)
self.update_broker()
print "-------------------------------------------"
print "Service.lsm_host_name", Service.lsm_host_name
print "Logline.lsm_current_host_name", Logline.lsm_current_host_name
print "-------------------------------------------"
self.livestatus_broker.db.log_db_do_archive()
print "request logs from", int(now - 3600), int(now + 3600)
print "request logs from", time.asctime(time.localtime(int(now - 3600))), time.asctime(time.localtime(int(now + 3600)))
request = """GET log
Filter: time >= """ + str(int(now - 3600)) + """
Filter: time <= """ + str(int(now + 3600)) + """
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
print "next next_log_db_rotate", time.asctime(time.localtime(self.livestatus_broker.db.next_log_db_rotate))
result = self.livestatus_broker.db.log_db_historic_contents()
for day in result:
print "file is", day[0]
print "start is", time.asctime(time.localtime(day[3]))
print "stop is", time.asctime(time.localtime(day[4]))
print "archive is", day[2]
print "handle is", day[1]
print self.livestatus_broker.db.log_db_relevant_files(now - 3600, now + 3600)
def test_num_logs(self):
self.print_header()
host = self.sched.hosts.find_by_name("test_host_0")
now = time.time()
time_warp(-1 * 3600 * 24 * 7)
num_logs = 0
while time.time() < now:
host.state = 'DOWN'
host.state_type = 'SOFT'
host.attempt = 1
host.output = "i am down"
host.raise_alert_log_entry()
num_logs += 1
time.sleep(3600)
host.state = 'UP'
host.state_type = 'HARD'
host.attempt = 1
host.output = "i am up"
host.raise_alert_log_entry()
num_logs += 1
time.sleep(3600)
self.update_broker()
self.livestatus_broker.db.log_db_do_archive()
print "request logs from", int(now - 3600 * 24 * 5), int(now - 3600 * 24 * 3)
print "request logs from", time.asctime(time.localtime(int(now - 3600 * 24 * 5))), time.asctime(time.localtime(int(now - 3600 * 24 * 3)))
request = """GET log
Filter: time >= """ + str(int(now - 3600 * 24 * 5)) + """
Filter: time <= """ + str(int(now - 3600 * 24 * 3)) + """
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
print "next next_log_db_rotate", time.asctime(time.localtime(self.livestatus_broker.db.next_log_db_rotate))
result = self.livestatus_broker.db.log_db_historic_contents()
for day in result:
print "file is", day[0]
print "start is", time.asctime(time.localtime(day[3]))
print "stop is", time.asctime(time.localtime(day[4]))
print "archive is", day[2]
print "handle is", day[1]
print self.livestatus_broker.db.log_db_relevant_files(now - 3 * 24 * 3600, now)
def test_split_database(self):
#
# after daylight-saving time has begun or ended,
# this test may fail for some days
#
#os.removedirs("var/archives")
self.print_header()
host = self.sched.hosts.find_by_name("test_host_0")
save_now = time.time()
today = datetime.datetime.fromtimestamp(time.time())
today_noon = datetime.datetime(today.year, today.month, today.day, 12, 0, 0)
today_morning = datetime.datetime(today.year, today.month, today.day, 0, 0, 0)
back2days_noon = today_noon - datetime.timedelta(days=2)
back2days_morning = today_morning - datetime.timedelta(days=2)
back4days_noon = today_noon - datetime.timedelta(days=4)
back4days_morning = today_morning - datetime.timedelta(days=4)
today_noon = int(time.mktime(today_noon.timetuple()))
today_morning = int(time.mktime(today_morning.timetuple()))
back2days_noon = int(time.mktime(back2days_noon.timetuple()))
back2days_morning = int(time.mktime(back2days_morning.timetuple()))
back4days_noon = int(time.mktime(back4days_noon.timetuple()))
back4days_morning = int(time.mktime(back4days_morning.timetuple()))
now = time.time()
time_warp(-1 * (now - back4days_noon))
now = time.time()
print "4t is", time.asctime(time.localtime(int(now)))
logs = 0
for day in range(1, 5):
print "day", day
# at 12:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(3600)
# at 13:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(36000)
# at 23:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(3600)
# at 00:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(43200)
# day 1: 1 * (2 + 2 + 2)
# day 2: 2 * (2 + 2 + 2) + 1 * 2 (from last loop)
# day 3: 3 * (2 + 2 + 2) + 2 * 2 (from last loop)
# day 4: 4 * (2 + 2 + 2) + 3 * 2 (from last loop)
# today: 4 * 2 (from last loop)
# 6 + 14 + 22 + 30 + 8 = 80
now = time.time()
print "0t is", time.asctime(time.localtime(int(now)))
request = """GET log
OutputFormat: python
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
pyresponse = eval(response)
# ignore these internal logs
pyresponse = [l for l in pyresponse if l[1].strip() not in ["Warning", "Info", "Debug"]]
print "Raw pyresponse", pyresponse
print "pyresponse", len(pyresponse)
print "expect", logs
self.assert_(len(pyresponse) == logs)
self.livestatus_broker.db.log_db_do_archive()
self.assert_(os.path.exists("tmp/archives"))
self.assert_(len([d for d in os.listdir("tmp/archives") if not d.endswith("journal")]) == 4)
lengths = []
for db in sorted([d for d in os.listdir("tmp/archives") if not d.endswith("journal")]):
dbmodconf = Module({'module_name': 'LogStore',
'module_type': 'logstore_sqlite',
'use_aggressive_sql': '0',
'database_file': "tmp/archives/" + db,
'max_logs_age': '0',
})
tmpconn = LiveStatusLogStoreSqlite(dbmodconf)
tmpconn.open()
numlogs = tmpconn.execute("SELECT COUNT(*) FROM logs")
lengths.append(numlogs[0][0])
print "db entries", db, numlogs
tmpconn.close()
print "lengths is", lengths
self.assert_(lengths == [6, 14, 22, 30])
request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon)) + """
OutputFormat: python
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
pyresponse = eval(response)
self.assert_(len(pyresponse) == 30)
print "pyresponse", len(pyresponse)
print "expect", logs
self.livestatus_broker.db.log_db_do_archive()
request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon)) + """
OutputFormat: python
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
pyresponse = eval(response)
self.assert_(len(pyresponse) == 30)
print "pyresponse", len(pyresponse)
print "expect", logs
self.livestatus_broker.db.log_db_do_archive()
request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon) - 1) + """
OutputFormat: python
Columns: time type options state host_name"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print response
pyresponse = eval(response)
self.assert_(len(pyresponse) == 24)
print "pyresponse", len(pyresponse)
print "expect", logs
# now warp to the time when we entered this test
time_warp(-1 * (time.time() - save_now))
# and now start the same logging
today = datetime.datetime.fromtimestamp(time.time())
today_noon = datetime.datetime(today.year, today.month, today.day, 12, 0, 0)
today_morning = datetime.datetime(today.year, today.month, today.day, 0, 0, 0)
back2days_noon = today_noon - datetime.timedelta(days=2)
back2days_morning = today_morning - datetime.timedelta(days=2)
back4days_noon = today_noon - datetime.timedelta(days=4)
back4days_morning = today_morning - datetime.timedelta(days=4)
today_noon = int(time.mktime(today_noon.timetuple()))
today_morning = int(time.mktime(today_morning.timetuple()))
back2days_noon = int(time.mktime(back2days_noon.timetuple()))
back2days_morning = int(time.mktime(back2days_morning.timetuple()))
back4days_noon = int(time.mktime(back4days_noon.timetuple()))
back4days_morning = int(time.mktime(back4days_morning.timetuple()))
now = time.time()
time_warp(-1 * (now - back4days_noon))
now = time.time()
time.sleep(5)
print "4t is", time.asctime(time.localtime(int(now)))
logs = 0
for day in range(1, 5):
print "day", day
# at 12:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(3600)
# at 13:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(36000)
# at 23:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(3600)
# at 00:00
now = time.time()
print "it is", time.asctime(time.localtime(int(now)))
self.write_logs(host, day)
logs += 2 * day
time.sleep(43200)
# day 1: 1 * (2 + 2 + 2)
# day 2: 2 * (2 + 2 + 2) + 1 * 2 (from last loop)
# day 3: 3 * (2 + 2 + 2) + 2 * 2 (from last loop)
# day 4: 4 * (2 + 2 + 2) + 3 * 2 (from last loop)
# today: 4 * 2 (from last loop)
# 6 + 14 + 22 + 30 + 8 = 80
self.livestatus_broker.db.log_db_do_archive()
self.assert_(os.path.exists("tmp/archives"))
self.assert_(len([d for d in os.listdir("tmp/archives") if not d.endswith("journal")]) == 4)
lengths = []
for db in sorted([d for d in os.listdir("tmp/archives") if not d.endswith("journal")]):
dbmodconf = Module({'module_name': 'LogStore',
'module_type': 'logstore_sqlite',
'use_aggressive_sql': '0',
'database_file': "tmp/archives/" + db,
'max_logs_age': '0',
})
tmpconn = LiveStatusLogStoreSqlite(dbmodconf)
tmpconn.open()
numlogs = tmpconn.execute("SELECT COUNT(*) FROM logs")
lengths.append(numlogs[0][0])
print "db entries", db, numlogs
tmpconn.close()
print "lengths is", lengths
self.assert_(lengths == [12, 28, 44, 60])
def xtest_david_database(self):
#os.removedirs("var/archives")
self.print_header()
lengths = []
dbh = LiveStatusDb("tmp/livestatus.db", "tmp/archives", 3600)
numlogs = dbh.execute("SELECT COUNT(*) FROM logs")
lengths.append(numlogs[0][0])
print "db main entries", numlogs
dbh.close()
start = time.time()
os.system("date")
dbh = LiveStatusDb("tmp/livestatus.db", "tmp/archives", 3600)
dbh.log_db_do_archive()
dbh.close()
os.system("date")
stop = time.time()
for db in sorted(os.listdir("tmp/archives")):
dbh = LiveStatusDb("tmp/archives/" + db, "tmp", 3600)
numlogs = dbh.execute("SELECT COUNT(*) FROM logs")
lengths.append(numlogs[0][0])
print "db entries", db, numlogs
dbh.close()
print "lengths is", lengths
def test_archives_path(self):
#os.removedirs("var/archives")
self.print_header()
lengths = []
database_file = "dotlivestatus.db"
archives_path = os.path.join(os.path.dirname(database_file), 'archives')
print "archive is", archives_path
def test_sven(self):
self.print_header()
host = self.sched.hosts.find_by_name("test_host_0")
now = time.time()
num_logs = 0
host.state = 'DOWN'
host.state_type = 'SOFT'
host.attempt = 1
host.output = "i am down"
host.raise_alert_log_entry()
time.sleep(60)
host.state = 'UP'
host.state_type = 'HARD'
host.attempt = 1
host.output = "i am up"
host.raise_alert_log_entry()
time.sleep(60)
self.show_logs()
self.update_broker()
self.livestatus_broker.db.log_db_do_archive()
query_end = time.time() + 3600
query_start = query_end - 3600 * 24 * 21
request = """GET log
Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
Filter: time >= """ + str(int(query_start)) + """
Filter: time <= """ + str(int(query_end)) + """
And: 2
Filter: host_name = test_host_0
Filter: type = HOST ALERT
Filter: options ~ ;HARD;
Filter: type = INITIAL HOST STATE
Filter: options ~ ;HARD;
Filter: type = CURRENT HOST STATE
Filter: options ~ ;HARD;
Filter: type = HOST DOWNTIME ALERT
Or: 7
And: 2
Filter: host_name = test_host_0
Filter: type = SERVICE ALERT
Filter: options ~ ;HARD;
Filter: type = INITIAL SERVICE STATE
Filter: options ~ ;HARD;
Filter: type = CURRENT SERVICE STATE
Filter: options ~ ;HARD;
Filter: type = SERVICE DOWNTIME ALERT
Or: 7
And: 2
Filter: class = 2
Filter: type ~~ TIMEPERIOD TRANSITION
Or: 4
OutputFormat: json
ResponseHeader: fixed16
"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print request
print response
pyresponse = eval(response.splitlines()[1])
pyresponse = [l for l in pyresponse if l[2].strip() not in ["Warning", "Info", "Debug"]]
print pyresponse
self.assert_(len(pyresponse) == 2)
class TestConfigBig(TestConfig):
def setUp(self):
start_setUp = time.time()
self.setup_with_file('etc/nagios_5r_100h_2000s.cfg')
Comment.id = 1
self.testid = str(os.getpid() + random.randint(1, 1000))
self.init_livestatus()
print "Cleaning old broks?"
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
self.update_broker()
print "************* Overall Setup:", time.time() - start_setUp
# add use_aggressive_host_checking so we can mix exit codes 1 and 2
# but still get DOWN state
host = self.sched.hosts.find_by_name("test_host_000")
host.__class__.use_aggressive_host_checking = 1
def test_a_long_history(self):
#return
test_host_005 = self.sched.hosts.find_by_name("test_host_005")
test_host_099 = self.sched.hosts.find_by_name("test_host_099")
test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01")
test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04")
test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
days = 4
etime = time.time()
print "now it is", time.ctime(etime)
print "now it is", time.gmtime(etime)
etime_midnight = (etime - (etime % 86400)) + time.altzone
print "midnight was", time.ctime(etime_midnight)
print "midnight was", time.gmtime(etime_midnight)
query_start = etime_midnight - (days - 1) * 86400
query_end = etime_midnight
print "query_start", time.ctime(query_start)
print "query_end ", time.ctime(query_end)
# |----------|----------|----------|----------|----------|---x
# etime
# etime_midnight
# ---x------
# etime - 4 days
# |---
# query_start
#
# ............................................
# events in the log database ranging till now
#
# |________________________________|
# events which will be read from db
#
loops = int(86400 / 192)
time_warp(-1 * days * 86400)
print "warp back to", time.ctime(time.time())
# run silently
old_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
should_be = 0
for day in xrange(days):
sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops))
self.scheduler_loop(2, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
[test_ok_04, 0, "OK"],
[test_ok_16, 0, "OK"],
[test_ok_99, 0, "OK"],
])
self.update_broker()
#for i in xrange(3600 * 24 * 7):
for i in xrange(loops):
if i % 10000 == 0:
sys.stderr.write(str(i))
if i % 399 == 0:
self.scheduler_loop(3, [
[test_ok_00, 1, "WARN"],
[test_ok_01, 2, "CRIT"],
[test_ok_04, 3, "UNKN"],
[test_ok_16, 1, "WARN"],
[test_ok_99, 2, "CRIT"],
])
if int(time.time()) >= query_start and int(time.time()) <= query_end:
should_be += 3
sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(62)
if i % 399 == 0:
self.scheduler_loop(1, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
[test_ok_04, 0, "OK"],
[test_ok_16, 0, "OK"],
[test_ok_99, 0, "OK"],
])
if int(time.time()) >= query_start and int(time.time()) <= query_end:
should_be += 1
sys.stderr.write("now it should be %s\n" % should_be)
time.sleep(2)
if i % 17 == 0:
self.scheduler_loop(3, [
[test_ok_00, 1, "WARN"],
[test_ok_01, 2, "CRIT"],
])
time.sleep(62)
if i % 17 == 0:
self.scheduler_loop(1, [
[test_ok_00, 0, "OK"],
[test_ok_01, 0, "OK"],
])
time.sleep(2)
if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 2, "DOWN"],
])
if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 2, "DOWN"],
])
time.sleep(62)
if i % 14 == 0:
self.scheduler_loop(3, [
[test_host_005, 0, "UP"],
])
if i % 12 == 0:
self.scheduler_loop(3, [
[test_host_099, 0, "UP"],
])
time.sleep(2)
self.update_broker()
if i % 1000 == 0:
self.livestatus_broker.db.commit()
endtime = time.time()
self.livestatus_broker.db.commit()
sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time())))
sys.stdout.close()
sys.stdout = old_stdout
self.livestatus_broker.db.commit_and_rotate_log_db()
numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs")
print "numlogs is", numlogs
# now we have a lot of events
# find type = HOST ALERT for test_host_005
request = """GET log
Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
Filter: time >= """ + str(int(query_start)) + """
Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
And: 1
Filter: type = SERVICE FLAPPING ALERT
Filter: type = HOST FLAPPING ALERT
Filter: type = SERVICE DOWNTIME ALERT
Filter: type = HOST DOWNTIME ALERT
Filter: type ~ starting...
Filter: type ~ shutting down...
Or: 8
Filter: host_name = test_host_099
Filter: service_description = test_ok_01
And: 5
OutputFormat: json"""
# switch back to realtime. we want to know how long it takes
fake_time_time = time.time
fake_time_sleep = time.sleep
time.time = original_time_time
time.sleep = original_time_sleep
print request
print "query 1 --------------------------------------------------"
tic = time.time()
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
tac = time.time()
pyresponse = eval(response)
print "number of records with test_ok_01", len(pyresponse)
self.assert_(len(pyresponse) == should_be)
# and now test Negate:
request = """GET log
Filter: time >= """ + str(int(query_start)) + """
Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
And: 1
Filter: type = SERVICE FLAPPING ALERT
Filter: type = HOST FLAPPING ALERT
Filter: type = SERVICE DOWNTIME ALERT
Filter: type = HOST DOWNTIME ALERT
Filter: type ~ starting...
Filter: type ~ shutting down...
Or: 8
Filter: host_name = test_host_099
Filter: service_description = test_ok_01
And: 2
Negate:
And: 2
OutputFormat: json"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
print "got response with true instead of negate"
notpyresponse = eval(response)
print "number of records without test_ok_01", len(notpyresponse)
request = """GET log
Filter: time >= """ + str(int(query_start)) + """
Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
And: 1
Filter: type = SERVICE FLAPPING ALERT
Filter: type = HOST FLAPPING ALERT
Filter: type = SERVICE DOWNTIME ALERT
Filter: type = HOST DOWNTIME ALERT
Filter: type ~ starting...
Filter: type ~ shutting down...
Or: 8
OutputFormat: json"""
response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
allpyresponse = eval(response)
print "all records", len(allpyresponse)
self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse))
# the numlogs above only counts records in the currently attached db
numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs WHERE time >= %d AND time <= %d" % (int(query_start), int(query_end)))
print "numlogs is", numlogs
time.time = fake_time_time
time.sleep = fake_time_sleep
class TestConfigNoLogstore(TestConfig):
def setUp(self):
start_setUp = time.time()
self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
Comment.id = 1
self.testid = str(os.getpid() + random.randint(1, 1000))
self.init_livestatus()
print "Cleaning old broks?"
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
self.update_broker()
print "************* Overall Setup:", time.time() - start_setUp
# add use_aggressive_host_checking so we can mix exit codes 1 and 2
# but still get DOWN state
host = self.sched.hosts.find_by_name("test_host_0")
host.__class__.use_aggressive_host_checking = 1
def tearDown(self):
self.livestatus_broker.db.commit()
self.livestatus_broker.db.close()
if os.path.exists(self.livelogs):
os.remove(self.livelogs)
if os.path.exists(self.livelogs + "-journal"):
os.remove(self.livelogs + "-journal")
if os.path.exists(self.livestatus_broker.pnp_path):
shutil.rmtree(self.livestatus_broker.pnp_path)
if os.path.exists('var/nagios.log'):
os.remove('var/nagios.log')
if os.path.exists('var/retention.dat'):
os.remove('var/retention.dat')
if os.path.exists('var/status.dat'):
os.remove('var/status.dat')
self.livestatus_broker = None
def init_livestatus(self):
self.livelogs = 'tmp/livelogs.db' + self.testid
modconf = Module({'module_name': 'LiveStatus',
'module_type': 'livestatus',
'port': str(50000 + os.getpid()),
'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
'host': '127.0.0.1',
'socket': 'live',
'name': 'test', #?
'database_file': self.livelogs,
})
dbmodconf = Module({'module_name': 'LogStore',
'module_type': 'logstore_sqlite',
'use_aggressive_sql': "0",
'database_file': self.livelogs,
'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'),
})
####################################
# !NOT! modconf.modules = [dbmodconf]
####################################
self.livestatus_broker = LiveStatus_broker(modconf)
self.livestatus_broker.create_queues()
self.livestatus_broker.init()
#--- livestatus_broker.main
self.livestatus_broker.log = logger
# this seems to damage the logger so that the scheduler can't use it
#self.livestatus_broker.log.load_obj(self.livestatus_broker)
self.livestatus_broker.debug_output = []
self.livestatus_broker.modules_manager = ModulesManager('livestatus', self.livestatus_broker.find_modules_path(), [])
self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
# We can now output some previouly silented debug ouput
self.livestatus_broker.do_load_modules()
for inst in self.livestatus_broker.modules_manager.instances:
if inst.properties["type"].startswith('logstore'):
f = getattr(inst, 'load', None)
if f and callable(f):
f(self.livestatus_broker) # !!! NOT self here !!!!
break
for s in self.livestatus_broker.debug_output:
print "errors during load", s
del self.livestatus_broker.debug_output
self.livestatus_broker.add_compatibility_sqlite_module()
self.livestatus_broker.rg = LiveStatusRegenerator()
self.livestatus_broker.datamgr = datamgr
datamgr.load(self.livestatus_broker.rg)
self.livestatus_broker.query_cache = LiveStatusQueryCache()
self.livestatus_broker.query_cache.disable()
self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
#--- livestatus_broker.main
#--- livestatus_broker.do_main
self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
self.livestatus_broker.db.open()
#--- livestatus_broker.do_main
#--- livestatus_broker.manage_lql_thread
self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
#--- livestatus_broker.manage_lql_thread
def test_has_implicit_module(self):
self.assert_(self.livestatus_broker.modules_manager.instances[0].properties['type'] == 'logstore_sqlite')
self.assert_(self.livestatus_broker.modules_manager.instances[0].__class__.__name__ == 'LiveStatusLogStoreSqlite')
self.assert_(self.livestatus_broker.db.database_file == self.livelogs)
if __name__ == '__main__':
#import cProfile
command = """unittest.main()"""
unittest.main()
#cProfile.runctx( command, globals(), locals(), filename="/tmp/livestatus.profile" )
| wbsavage/shinken | test/test_livestatus_db.py | Python | agpl-3.0 | 36,115 |
"""
This module provides a set of utilities for writing TSV files.
.. autoclass:: mysqltsv.writer.Writer
:members:
.. autofunction:: mysqltsv.functions.write
"""
import logging
from .util import write_row
logger = logging.getLogger(__name__)
class Writer:
"""
Constructs a new TSV row writer.
:Parameters:
f : `file`
A file pointer to write rows to
headers : `list`(`str`)
If a list of `str` is provided, use those strings as headers.
Otherwise, no headers are written.
none_string : `str`
A string that will be written as None when read. (Defaults to
"NULL")
"""
def __init__(self, f, headers=None, none_string="NULL"):
self.f = f
self.none_string = none_string
if headers != None:
write_row(headers, self.f, none_string=self.none_string)
self.headers = headers
def write(self, row):
"""
Writes a row to the output file.
:Parameters:
row : `list` | `dict` | :class:`~mysqltsv.row_type.AbstractRow`
Datastructure representing the row to write
"""
write_row(row, self.f, headers=self.headers,
none_string=self.none_string)
| halfak/mysqltsv | mysqltsv/writer.py | Python | mit | 1,278 |
"""The tests for the pushbullet notification platform."""
from http import HTTPStatus
import json
from unittest.mock import patch
from pushbullet import PushBullet
import pytest
import homeassistant.components.notify as notify
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, load_fixture
@pytest.fixture
def mock_pushbullet():
"""Mock pushbullet."""
with patch.object(
PushBullet,
"_get_data",
return_value=json.loads(load_fixture("pushbullet_devices.json")),
):
yield
async def test_pushbullet_config(hass, mock_pushbullet):
"""Test setup."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
async def test_pushbullet_config_bad(hass):
"""Test set up the platform with bad/missing configuration."""
config = {notify.DOMAIN: {"platform": "pushbullet"}}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert not handle_config[notify.DOMAIN]
async def test_pushbullet_push_default(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {"title": "Test Title", "message": "Test Message"}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {"body": "Test Message", "title": "Test Title", "type": "note"}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_device(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_devices(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"device_iden": "identity2",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_email(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["email/user@host.net"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
assert len(requests_mock.request_history) == 1
expected_body = {
"body": "Test Message",
"email": "user@host.net",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
async def test_pushbullet_push_mixed(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "email/user@host.net"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"email": "user@host.net",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_no_file(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=HTTPStatus.OK,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
"data": {"file": "not_a_file"},
}
assert not await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
| aronsky/home-assistant | tests/components/pushbullet/test_notify.py | Python | apache-2.0 | 8,810 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cpro', '0007_auto_20160928_2324'),
]
operations = [
migrations.AddField(
model_name='favoritecard',
name='owner',
field=models.ForeignKey(related_name='favoritecards', default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='favoritecard',
unique_together=set([('owner', 'card')]),
),
migrations.RemoveField(
model_name='favoritecard',
name='account',
),
migrations.RemoveField(
model_name='favoritecard',
name='_cache_account_owner_id',
),
migrations.RemoveField(
model_name='favoritecard',
name='_cache_account_last_update',
),
]
| SchoolIdolTomodachi/CinderellaProducers | cpro/migrations/0008_auto_20160928_2326.py | Python | apache-2.0 | 1,097 |
"""
Build control worker: cruise through build steps and dispatch
tasks to perform the actual build
Copyright (c) 2013 Heikki Nousiainen, F-Secure
See LICENSE for details
"""
import time
import tempfile
import copy
from distci.worker import worker_base, task_base
from distci import distcilib
class BuildControlWorker(worker_base.WorkerBase):
""" Build control worker """
def __init__(self, config):
worker_base.WorkerBase.__init__(self, config)
self.worker_config['capabilities'] = ['build_control_v1']
self.build_states = {}
self.distci_client = distcilib.DistCIClient(config)
def update_build_state(self, task_key):
self.build_states[task_key]['last_updated'] = int(time.time())
for _ in range(self.worker_config.get('retry_count', 10)):
if self.distci_client.builds.state.put(self.build_states[task_key]['job_id'], self.build_states[task_key]['build_number'], self.build_states[task_key]['build_state']) is not None:
return True
self.log.error('Failed to update build state, job %s build %s' % (self.build_states[task_key]['job_id'], self.build_states[task_key]['build_number']))
return False
def get_job_config(self, task_key):
self.log.debug('Get job config %s', task_key)
if self.build_states[task_key].get('job_config') is None:
for _ in range(self.worker_config.get('retry_count', 10)):
config = self.distci_client.jobs.get(self.build_states[task_key]['job_id'])
if config is not None and config.get('config') is not None:
self.build_states[task_key]['job_config'] = config['config']
self.build_states[task_key]['build_state']['config'] = config['config']
break
if self.build_states[task_key].get('job_config') is None:
self.log.error('Failed to fetch job config')
return
# update build state
if self.update_build_state(task_key) == True:
self.build_states[task_key]['state'] = 'create-workspace'
def create_workspace(self, task_key):
self.log.debug('Create workspace for %s', task_key)
# create and store empty workspace
self.log.debug('Creating workspace')
tmp_dir = tempfile.mkdtemp()
if self.send_workspace(self.build_states[task_key]['job_id'], self.build_states[task_key]['build_number'], tmp_dir) == False:
self.log.error('Failed to store empty workspace')
self.delete_workspace(tmp_dir)
return
self.delete_workspace(tmp_dir)
# update build state
if self.update_build_state(task_key) == True:
self.build_states[task_key]['state'] = 'running'
def spawn_subtask(self, task_key, subtask_index):
subtask_config = self.build_states[task_key]['job_config']['tasks'][subtask_index]
if subtask_config['type'] == 'git-checkout':
capabilities = [ 'git_checkout_v1' ]
elif subtask_config['type'] == 'copy-artifacts':
capabilities = [ 'copy_artifacts_v1' ]
elif subtask_config['type'] == 'execute-shell':
capabilities = [ 'execute_shell_v1' ]
for label in subtask_config['params'].get('nodelabels', []):
capabilities.append('nodelabel_%s' % label)
elif subtask_config['type'] == 'publish-artifacts':
capabilities = [ 'publish_artifacts_v1' ]
else:
self.build_states[task_key]['build_state']['tasks'][subtask_index]({
'status': 'complete',
'result': 'error',
'error_message': 'Unknown subtask type %s' % subtask_config['type'] })
return True
task_descr = { 'status': 'pending',
'job_id': self.build_states[task_key]['job_id'],
'build_number': self.build_states[task_key]['build_number'],
'capabilities': capabilities,
'params': copy.deepcopy(self.build_states[task_key]['job_config']['tasks'][subtask_index]['params']) }
task_obj = task_base.GenericTask(task_descr, None)
task_obj = self.post_new_task(task_obj)
if task_obj is None:
self.log.error('Failed to post new task for job %s build %s', self.build_states[task_key]['job_id'], self.build_states[task_key]['build_number'])
return False
self.build_states[task_key]['build_state']['tasks'][subtask_index] = task_obj.config
self.build_states[task_key]['build_state']['tasks'][subtask_index]['id'] = task_obj.id
return True
def update_state_after_subtask_completion(self, task_key, subtask_index):
artifacts = self.build_states[task_key]['build_state']['tasks'][subtask_index].get('artifacts')
if artifacts is not None:
for artifact_id, path in artifacts.iteritems():
self.build_states[task_key]['build_state']['artifacts'][artifact_id] = path
if self.update_build_state(task_key) == False:
return False
return True
def check_status_and_issue_tasks(self, task_key):
self.log.debug('Checking status for %s', task_key)
for subtask_index in range(len(self.build_states[task_key]['job_config']['tasks'])):
subtask_desc = self.build_states[task_key]['build_state']['tasks'].get(subtask_index)
if subtask_desc is None:
if self.spawn_subtask(task_key, subtask_index) == False:
return
subtask_desc = self.build_states[task_key]['build_state']['tasks'][subtask_index]
if subtask_desc['status'] != 'complete':
subtask = self.get_task(subtask_desc['id'])
if subtask is not None:
self.build_states[task_key]['build_state']['tasks'][subtask_index].update(subtask.config)
if self.update_build_state(task_key) == False:
return
if subtask_desc['status'] != 'complete':
return
if subtask_desc['result'] != 'success':
self.build_states[task_key]['state'] = 'complete'
self.build_states[task_key]['build_state']['status'] = 'complete'
self.build_states[task_key]['build_state']['result'] = 'failure'
return
if self.update_state_after_subtask_completion(task_key, subtask_index) == False:
return
self.build_states[task_key]['state'] = 'complete'
self.build_states[task_key]['build_state']['status'] = 'complete'
self.build_states[task_key]['build_state']['result'] = 'success'
def report_complete_status(self, task_key):
# delete workspace
for _ in range(self.worker_config.get('retry_count', 10)):
if self.distci_client.builds.workspace.delete(self.build_states[task_key]['job_id'], self.build_states[task_key]['build_number']) == True:
break
if self.update_build_state(task_key) == False:
return
# clean up subtasks
for _, subtask_data in self.build_states[task_key]['build_state']['tasks'].iteritems():
if subtask_data['id'] is not None:
for _ in range(self.worker_config.get('retry_count', 10)):
if self.distci_client.tasks.delete(subtask_data['id']) == True:
subtask_data['id'] = None
break
if subtask_data['id'] is not None:
return
# delete our main task
task = self.build_states[task_key]['task']
for _ in range(self.worker_config.get('retry_count', 10)):
if self.distci_client.tasks.delete(task.id) == True:
task.id = None
break
if task.id is not None:
return
# trigger downstream jobs
if self.build_states[task_key]['build_state'].get('result') == 'success':
for job in self.build_states[task_key]['job_config'].get('downstream_jobs', []):
self.distci_client.builds.trigger(job)
self.build_states[task_key]['state'] = 'reported'
def start(self):
while True:
new_task = self.fetch_task(timeout=10)
if new_task is not None:
self.build_states[new_task.id] = {
'state': 'get-job-config',
'last_updated': int(time.time()),
'job_id': new_task.config.get('job_id'),
'build_number': new_task.config.get('build_number'),
'build_state': {
'status': 'running',
'controller': self.uuid,
'tasks': {},
'artifacts': {} },
'task': new_task }
for task_key in self.build_states.keys():
if self.build_states[task_key]['state'] == 'get-job-config':
self.get_job_config(task_key)
if self.build_states[task_key]['state'] == 'create-workspace':
self.create_workspace(task_key)
if self.build_states[task_key]['state'] == 'running':
self.check_status_and_issue_tasks(task_key)
if self.build_states[task_key]['state'] == 'complete':
self.report_complete_status(task_key)
if self.build_states[task_key]['state'] == 'reported':
del self.build_states[task_key]
| F-Secure/distci | src/distci/worker/build_control/build_control.py | Python | apache-2.0 | 9,652 |
from heppy.framework.analyzer import Analyzer
from heppy.framework.event import Event
from heppy_fcc.particles.tlv.jet import Jet
from heppy_fcc.particles.jet import JetConstituents
import os
from ROOT import gSystem
CCJetClusterizer = None
if os.environ.get('ANALYSISCPP'):
gSystem.Load("libanalysiscpp-tools")
from ROOT import JetClusterizer as CCJetClusterizer
elif os.environ.get('CMSSW_BASE'):
gSystem.Load("libColinPFSim")
from ROOT import heppy
CCJetClusterizer = heppy.JetClusterizer
import math
class JetClusterizer(Analyzer):
'''Jet clusterizer.
Makes use of the JetClusterizer class compiled in the analysis-cpp package.
Example configuration:
papas_jets = cfg.Analyzer(
JetClusterizer,
instance_label = 'papas',
particles = 'papas_rec_particles'
)
particles: Name of the input particle collection.
The output jet collection name is built from the instance_label,
in this case "papas_jets".
'''
def __init__(self, *args, **kwargs):
super(JetClusterizer, self).__init__(*args, **kwargs)
min_e = 0.
self.clusterizer = CCJetClusterizer(min_e)
def validate(self, jet):
constits = jet.constituents
keys = set(jet.constituents.keys())
all_possible = set([211, 22, 130, 11, 13, 1, 2])
if not keys.issubset(all_possible):
print constits
assert(False)
sume = 0.
for component in jet.constituents.values():
if component.e() - jet.e() > 1e-5:
import pdb; pdb.set_trace()
sume += component.e()
if jet.e() - sume > 1e-5:
import pdb; pdb.set_trace()
def process(self, event):
particles = getattr(event, self.cfg_ana.particles)
# removing neutrinos
particles = [ptc for ptc in particles if abs(ptc.pdgid()) not in [12,14,16]]
self.clusterizer.clear();
for ptc in particles:
self.clusterizer.add_p4( ptc.p4() )
self.clusterizer.clusterize()
jets = []
for jeti in range(self.clusterizer.n_jets()):
jet = Jet( self.clusterizer.jet(jeti) )
jet.constituents = JetConstituents()
jets.append( jet )
for consti in range(self.clusterizer.n_constituents(jeti)):
constituent_index = self.clusterizer.constituent_index(jeti, consti)
constituent = particles[constituent_index]
jet.constituents.append(constituent)
jet.constituents.sort()
self.validate(jet)
setattr(event, self.instance_label, jets)
| semkiv/heppy_fcc | analyzers/JetClusterizer.py | Python | gpl-3.0 | 2,690 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-21 16:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question', '0006_auto_20171121_1107'),
]
operations = [
migrations.AlterField(
model_name='checkin',
name='answers',
field=models.ManyToManyField(null=True, through='question.AnswerSet', to='question.Answers'),
),
]
| airportmarc/the416life | src/apps/question/migrations/0007_auto_20171121_1110.py | Python | mit | 509 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
import random
from nova import exception
from nova import flags
from nova import log as logging
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
image1 = {'id': '123456',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
# 'container_format': 'ami',
# 'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'fake',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
# 'container_format': 'ami',
# 'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
image3 = {'id': '2',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
# 'container_format': 'ami',
# 'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
image4 = {'id': '1',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
# 'container_format': 'ami',
# 'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
image5 = {'id': '3',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
# 'container_format': 'ami',
# 'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
super(_FakeImageService, self).__init__()
def index(self, context, filters=None, marker=None, limit=None):
"""Returns list of images."""
retval = []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
return retval
def detail(self, context, filters=None, marker=None, limit=None):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
try:
image_id = metadata['id']
except KeyError:
while True:
image_id = random.randint(0, 2 ** 31 - 1)
if not self.images.get(str(image_id)):
break
image_id = str(image_id)
if self.images.get(image_id):
raise exception.Duplicate()
metadata['id'] = image_id
self.images[image_id] = copy.deepcopy(metadata)
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
self.images.clear()
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
| 30loops/nova | nova/image/fake.py | Python | apache-2.0 | 6,986 |
# coding=utf-8
import abc
import logging
import tempfile
from bireus.client.download_service import AbstractDownloadService
from bireus.client.notification_service import NotificationService
from bireus.shared import *
from bireus.shared.diff_head import DiffHead
from bireus.shared.diff_item import DiffItem
from bireus.shared.repository import ProtocolException
logger = logging.getLogger(__name__)
class PatchTask(abc.ABC):
_patch_tasks = None
def __init__(self, notification_service: NotificationService, download_service: AbstractDownloadService,
repository_url: str, repo_path: Path, patch_file: Path):
self._notification_service = notification_service
self._download_service = download_service
self._url = repository_url
self._repo_path = repo_path
self._patch_file = patch_file
self._target_version = None
def run(self) -> None:
# unpack the patch into a temp folder
temp_root = self._repo_path.joinpath(".bireus").joinpath("__temp__")
temp_root.mkdir(parents=True, exist_ok=True)
tempdir = tempfile.TemporaryDirectory(dir=str(temp_root))
unpack_archive(self._patch_file, tempdir.name)
diff_head = DiffHead.load_json_file(Path(tempdir.name).joinpath('.bireus'))
if diff_head.protocol != self.get_version():
logger.error(".bireus protocol version %s doesn't match patcher task version %s", diff_head.protocol,
self.get_version())
self._notification_service.error(".bireus protocol version %s doesn't match patcher task version %s" % (
diff_head.protocol, self.get_version()))
raise Exception(".bireus protocol version %s doesn't match patcher task version %s"
% (diff_head.protocol, self.get_version()))
self._target_version = diff_head.target_version
# begin the patching recursion
# note: a DiffHead's first and only item is the top folder itself
self.patch(diff_head.items[0], self._repo_path, Path(tempdir.name), False)
intermediate_folder = Path(self._repo_path.parent.joinpath(self._repo_path.name + ".patched"))
relative_temp_folder = Path(tempdir.name).relative_to(self._repo_path)
move_file(self._repo_path, intermediate_folder)
try:
move_file(intermediate_folder.joinpath(relative_temp_folder), self._repo_path)
self._repo_path.joinpath(".bireus").unlink() # remove the patch descriptor
move_file(intermediate_folder.joinpath(".bireus"), self._repo_path.joinpath(".bireus"))
finally:
remove_folder(intermediate_folder)
@classmethod
def get_factory(cls, protocol: int):
if cls._patch_tasks is None:
cls._patch_tasks = dict()
for patch_task_version in PatchTask.__subclasses__():
cls._patch_tasks[patch_task_version.get_version()] = patch_task_version.create
if protocol in cls._patch_tasks:
return cls._patch_tasks[protocol]
else:
raise ProtocolException("Protocol version `%s` is not supported in this client version", protocol)
@abc.abstractclassmethod
def get_version(cls) -> int:
pass
@abc.abstractclassmethod
def create(cls, notification_service: NotificationService, download_service: AbstractDownloadService, repository_url: str, repo_path: Path,
patch_file: Path) -> 'PatchTask':
"""
Abstract factory function for dynamic patcher initialization
same params as in constructor!
"""
pass
@abc.abstractmethod
def patch(self, diff: DiffItem, base_path: Path, patch_path: Path, inside_zip: bool = False) -> None:
pass
| Brutus5000/BiReUS | bireus/client/patch_tasks/base.py | Python | mit | 3,803 |
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QLabel, QLineEdit, QFrame, QDialogButtonBox, QApplication, QComboBox
from PyQt5.QtCore import pyqtSignal
from domestic.core import ReaderDb, isFeed, feedInfo
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.parse import urljoin
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
class FeedAddDialog(QDialog):
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.parent = parent
self.resize(400, 150)
self.vLayout = QVBoxLayout(self)
self.vLayout.setSpacing(5)
self.vLayout.setContentsMargins(5, 5, 5, 5)
self.labelTitle = QLabel(self)
self.vLayout.addWidget(self.labelTitle)
self.labelRSS = QLabel(self)
self.vLayout.addWidget(self.labelRSS)
self.lineEditURI = QLineEdit(self)
self.vLayout.addWidget(self.lineEditURI)
self.labelWarning = QLabel(self)
self.labelWarning.hide()
self.vLayout.addWidget(self.labelWarning)
self.labelCategory = QLabel(self)
self.labelCategory.setText(self.tr("Category:"))
self.labelCategory.hide()
self.vLayout.addWidget(self.labelCategory)
self.comboBox = QComboBox(self)
self.comboBox.addItem(self.tr("All Feeds"))
self.comboBox.hide()
self.vLayout.addWidget(self.comboBox)
self.line = QFrame(self)
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
self.vLayout.addWidget(self.line)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Save|QDialogButtonBox.Ok)
self.buttonBox.button(QDialogButtonBox.Cancel).setText(self.tr("Cancel"))
self.buttonBox.button(QDialogButtonBox.Save).setText(self.tr("Save"))
self.buttonBox.button(QDialogButtonBox.Save).clicked.connect(self.feedAdd)
self.buttonBox.button(QDialogButtonBox.Save).hide()
self.buttonBox.button(QDialogButtonBox.Ok).setText(self.tr("Ok"))
self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.feedControl)
self.buttonBox.button(QDialogButtonBox.Cancel).clicked.connect(self.reject)
self.vLayout.addWidget(self.buttonBox)
self.lineEditURI.textChanged.connect(self.changeText)
self.setWindowTitle(self.tr("New Feed"))
self.labelTitle.setText(self.tr("<span style='font-size:16pt; font-weight:bold;'>Add New Feed</span>"))
self.labelRSS.setText(self.tr("Enter link or source of feed:"))
url = QApplication.clipboard().text()
if url.startswith("http://") or url.startswith("https://"):
self.lineEditURI.setText(url)
else: self.lineEditURI.setText("http://")
self.lineEditURI.selectAll()
def changeText(self):
self.labelWarning.hide()
self.labelCategory.hide()
self.comboBox.hide()
self.buttonBox.button(QDialogButtonBox.Ok).show()
self.buttonBox.button(QDialogButtonBox.Save).hide()
self.resize(400, 150)
def faviconUrl(self, url):
try:
with urlopen(url) as html:
html = BeautifulSoup(html.read())
if not html.find(rel="shortcut icon") is None:
favicon_url = html.find(rel="shortcut icon")["href"]
elif not html.find(rel="icon")["href"] is None:
favicon_url = html.find(rel="icon")["href"]
return urljoin(url, favicon_url)
except:
return None
def getFavicon(self, url):
import sqlite3 as sql
if not url is None:
with urlopen(url) as favicon:
return sql.Binary(favicon.read())
else: return None
def feedControl(self):
feed = isFeed(self.lineEditURI.text())
if feed:
data = feedInfo(self.lineEditURI.text())
db = ReaderDb()
db.execute("select * from folders where feed_url=?", (data["feedlink"],))
if not db.cursor.fetchone():
db.execute("select * from folders where type='folder'")
for category in db.cursor.fetchall():
self.comboBox.addItem(category["title"])
self.labelCategory.show()
self.comboBox.show()
self.buttonBox.button(QDialogButtonBox.Save).show()
self.buttonBox.button(QDialogButtonBox.Ok).hide()
else:
self.labelWarning.setText(self.tr("<span style='color:red; font-size:15px; font-weight:bold;'>That feed is already exist!</span>"))
self.labelWarning.show()
else:
self.labelWarning.setText(self.tr("<span style='color:red; font-size:15px; font-weight:bold;'>Wrong link name!</span>"))
self.labelWarning.show()
feedAddFinished = pyqtSignal(str)
def feedAdd(self):
data = feedInfo(self.lineEditURI.text())
db = ReaderDb()
fav = self.faviconUrl(data["sitelink"])
db.execute("select id from folders where type='folder' and title=?", (self.comboBox.currentText(),))
folder = db.cursor.fetchone()
if folder:
category = folder["id"]
else: category = 0
db.execute("insert into folders (title, parent, type, feed_url, site_url, description, favicon) values (?, ?, 'feed', ?, ?, ?, ?)",
(data["title"], category, data["feedlink"], data["sitelink"], data["description"], self.getFavicon(fav)))
db.commit()
db.close()
self.feedAddFinished.emit(self.lineEditURI.text())
self.parent.syncSignal.emit()
self.parent.categorySync()
self.close() | mthnzbk/domestic | domestic/dialogs/feedadd.py | Python | gpl-3.0 | 5,865 |
# -*- coding:utf-8 -*-
import datetime
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.views.generic.base import TemplateView
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.utils import timezone
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from .models import SCDoc, RefillingCart
from index.models import CartridgeItemName, City
from index.helpers import str2int
from .forms.add_doc import AddDoc
from .forms.edit_name import EditName
from .forms.add_city import CityF
from .forms.edit_city import CityE
from common.cbv import GridListView
from common.helpers import date_to_localize_string
from accounts.decorators import permissions
class handbook(TemplateView):
template_name = 'docs/handbook.html'
@method_decorator(permissions('dict'))
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(handbook, self).dispatch(*args, **kwargs)
@permissions('dict')
@never_cache
def delivery(request):
"""Списки договоров на поставку расходников
"""
context = {} # выбираем только договора обслуживания и поставки
docs = SCDoc.objects.filter(departament=request.user.departament).filter( Q(doc_type=1) | Q(doc_type=2)).order_by('-pk')
context['docs'] = docs
if request.method == 'POST':
form = AddDoc(request.POST)
if form.is_valid():
data_in_post = form.cleaned_data
if request.GET.get('select', ''):
# если пользователь производит редактирование и сохранение документа
doc_id = request.GET.get('select', '')
doc_id = str2int(doc_id)
try:
doc = SCDoc.objects.get(pk=doc_id)
except SCDoc.DoesNotExist:
raise Http404
# производим сохранения изменений
doc.number = data_in_post.get('number','')
doc.date_of_signing = data_in_post.get('date','')
doc.firm = data_in_post.get('firm','')
doc.title = data_in_post.get('title','')
doc.short_cont = data_in_post.get('short_cont','')
doc_type = data_in_post.get('doc_type', '')
doc.money = data_in_post.get('money','')
doc.save()
messages.success(request, _('%(doc_num)s success saved.') % {'doc_num': doc.number})
else:
# если пользователь просто создаёт новый документ
m1 = SCDoc.objects.create(
number = data_in_post.get('number',''),
date_of_signing = data_in_post.get('date', 0),
date_created = timezone.now(),
firm = data_in_post.get('firm',''),
title = data_in_post.get('title',''),
short_cont = data_in_post.get('short_cont',''),
money = data_in_post.get('money',''),
doc_type = data_in_post.get('doc_type', ''),
departament = request.user.departament
)
messages.success(request, _('New %(doc_num)s success created.') % {'doc_num': data_in_post.get('number','')})
context['form'] = form
return HttpResponseRedirect(request.path)
else:
context['form'] = form
elif request.method == 'GET':
if request.GET.get('select', ''):
context['edit'] = True
doc_id = request.GET.get('select', '')
doc_id = str2int(doc_id)
try:
doc = SCDoc.objects.get(pk=doc_id)
except SCDoc.DoesNotExist:
raise Http404
if doc.date_of_signing:
date = date_to_localize_string(day=doc.date_of_signing.day, \
month=doc.date_of_signing.month, \
year=doc.date_of_signing.year) # str(doc.date_of_signing.day) + '/' + str(doc.date_of_signing.month) + '/' + str(doc.date_of_signing.year)
else:
date = ''
money = doc.money if doc.money else 0
money /= 100
form = AddDoc(initial={
'number': doc.number,
'title': doc.title,
'money': money,
'short_cont': doc.short_cont,
'firm': doc.firm,
'doc_type': doc.doc_type,
'date': date })
context['form'] = form
elif request.GET.get('delete', ''):
# ветка для удаления документа
doc_id = request.GET.get('delete', '')
try:
doc_id = int(doc_id)
except ValueError:
doc_id = 0
try:
doc = SCDoc.objects.get(pk=doc_id)
except SCDoc.DoesNotExist:
raise Http404
doc_number = doc.number
doc.delete()
messages.error(request, _('Document %(doc_number)s deleted!') % {'doc_number': doc_number})
return HttpResponseRedirect(reverse('docs:delivery'))
elif request.GET.get('show', ''):
# ветка для просотра одного конкретного договора
doc_id = request.GET.get('show', '')
try:
doc_id = int(doc_id)
except ValueError:
doc_id = 0
try:
doc = SCDoc.objects.filter(departament=request.user.departament).filter(pk=doc_id)
except SCDoc.DoesNotExist:
raise Http404
context['not_show_form'] = True
context['docs'] = doc
else:
form = AddDoc()
context['form'] = form
else:
# метод не поддерживается
pass
return render(request, 'docs/delivery.html', context)
@permissions('dict')
@never_cache
def edit_name(request):
"""Изменение имени картриджа (расходника)
"""
context = dict()
name_id = request.GET.get('id', 0)
name_id = str2int(name_id)
try:
m1 = CartridgeItemName.objects.get(pk=name_id)
except CartridgeItemName.DoesNotExist:
raise Http404
if request.method == 'POST':
form = EditName(request.POST)
if form.is_valid():
data_in_post = form.cleaned_data
cartName = data_in_post.get('cartName','')
cartType = data_in_post.get('cartType','')
comment = data_in_post.get('comment','')
manufacturer = data_in_post.get('manufacturer')
# сохраняем изменения в БД
m1.cart_itm_name = cartName
m1.cart_itm_type = cartType
m1.comment = comment
m1.manufacturer = manufacturer
m1.save()
return HttpResponseRedirect(reverse('docs:view_names'))
else:
# если в веденных данных есть ошибка
context['form'] = form
else:
# если пользователь перишёл через GET запрос
form = EditName(initial={ 'cartName': m1.cart_itm_name,
'cartType': m1.cart_itm_type,
'comment': m1.comment, 'manufacturer': m1.manufacturer })
context['form'] = form
return render(request, 'docs/edit_name.html', context)
class ViewSendActs(GridListView):
"""Просмотр списка актов передачи на заправку
"""
@method_decorator(permissions('dict'))
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(ViewSendActs, self).dispatch(*args, **kwargs)
def get(self, request, **kwargs):
all_acts = RefillingCart.objects.filter(departament=request.user.departament).filter(doc_type=1).order_by('-pk')
page_size = self.items_per_page()
self.context['page_size'] = page_size
self.context['docs'] = self.pagination(all_acts, page_size)
return render(request, 'docs/acts_list.html', self.context)
class ViewReturnActs(GridListView):
"""Просмотр списка актов передачи на заправку
"""
@method_decorator(permissions('dict'))
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(ViewReturnActs, self).dispatch(*args, **kwargs)
def get(self, request, **kwargs):
all_acts = RefillingCart.objects.filter(departament=request.user.departament).filter(doc_type=2).order_by('-pk')
page_size = self.items_per_page()
self.context['page_size'] = page_size
self.context['docs'] = self.pagination(all_acts, page_size)
return render(request, 'docs/return_acts_list.html', self.context)
@permissions('dict')
@never_cache
def add_city(request):
"""Добавление города в справочник.
"""
if request.method == 'POST':
form_obj = CityF(request.POST)
if form_obj.is_valid():
data_in_post = form_obj.cleaned_data
m1 = City(city_name=data_in_post['city_name'])
m1.save()
messages.success(request, _('City "%(city)s" success added.') % {'city': data_in_post['city_name']})
return redirect(reverse('docs:add_city'))
else:
form_obj = CityF(request.POST)
else:
form_obj = CityF()
return render(request, 'docs/add_city.html', {'form': form_obj})
@permissions('dict')
@never_cache
def edit_city(request):
"""Редактирование названия города.
"""
context = dict()
select = request.GET.get('select', 0)
try:
m1 = City.objects.get(pk=select)
except City.DoesNotExist:
return Http404
if request.method == 'POST':
form = CityE(request.POST)
if form.is_valid():
data_in_post = form.cleaned_data
m1.city_name = data_in_post['city_name']
m1.save()
messages.success(request, _('City "%(city)s" success edited.') % {'city': data_in_post['city_name']})
return redirect(reverse('docs:edit_city') + '?select=' + str(select))
else:
context['form'] = CityE(request.POST)
else:
context['form'] = CityE(initial={'city_name': m1.city_name,})
return render(request, 'docs/edit_city.html', context)
| sfcl/severcart | docs/views.py | Python | gpl-2.0 | 11,197 |
# python/example2.py -- Python version of an example application that shows
# how to use the form helper class. For a C++ implementation, see
# '../src/example2.cpp'.
#
# NanoGUI was developed by Wenzel Jakob <wenzel@inf.ethz.ch>.
# The widget drawing code is based on the NanoVG demo application
# by Mikko Mononen.
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.txt file.
import nanogui
import math
import gc
from nanogui import Screen, FormHelper
bvar = True
ivar = 12345678
dvar = math.pi
strvar = "A string"
enumvar = 1
colvar = nanogui.Color(.5, .5, .7, 1)
def make_accessors(name):
def setter(value):
globals()[name] = value
def getter():
return globals()[name]
return setter, getter
nanogui.init()
use_gl_4_1 = False # Set to True to create an OpenGL 4.1 context.
if use_gl_4_1:
# NanoGUI presents many options for you to utilize at your discretion.
# See include/nanogui/screen.h for what all of the options are.
screen = Screen((500, 700), "NanoGUI test [GL 4.1]", glMajor=4, glMinor=1)
else:
screen = Screen((500, 700), "NanoGUI test")
gui = FormHelper(screen)
window = gui.addWindow((10, 10), "Form helper example")
gui.addGroup("Basic types")
gui.addBoolVariable("bool", *make_accessors("bvar"))
gui.addStringVariable("string", *make_accessors("strvar"))
gui.addGroup("Validating fields")
gui.addIntVariable("int", *make_accessors("ivar"))
gui.addDoubleVariable("double", *make_accessors("dvar"))
gui.addGroup("Complex types")
gui.addEnumVariable("Enumeration", *make_accessors("enumvar")) \
.setItems(["Item 1", "Item 2", "Item 3"])
gui.addColorVariable("Color", *make_accessors("colvar"))
gui.addGroup("Other widgets")
def cb():
print("Button pressed.")
gui.addButton("A button", cb)
screen.setVisible(True)
screen.performLayout()
window.center()
nanogui.mainloop()
screen = gui = window = None
gc.collect()
nanogui.shutdown()
| LiveAsynchronousVisualizedArchitecture/lava | nuklear/nanogui/python/example2.py | Python | apache-2.0 | 1,982 |
from osim.env import L2RunEnv
import numpy as np
import unittest
class ActivationsTest(unittest.TestCase):
def test_clipping(self):
env = L2RunEnv(visualize=False)
observation = env.reset()
env.step(np.array([5.0] * 18))
self.assertLessEqual( np.sum(env.osim_model.last_action), 18.1 )
env.step(np.array([-1.0] * 18))
self.assertGreaterEqual( np.sum(env.osim_model.last_action), -0.1 )
if __name__ == '__main__':
unittest.main()
| stanfordnmbl/osim-rl | tests/test.clipping.py | Python | mit | 498 |
# -*- coding: utf-8 -*-
from odoo.tests import common
class TestGroupOnSelection(common.TransactionCase):
def setUp(self):
super(TestGroupOnSelection, self).setUp()
self.Model = self.env['test_read_group.on_selection']
def test_none(self):
self.Model.create({'value': 1})
self.Model.create({'value': 2})
self.Model.create({'value': 3})
groups = self.Model.read_group([], fields=['state', 'value'], groupby=['state'])
self.assertEqual(groups, [
{
'state': 'a',
'state_count': 0,
'value': False,
'__domain': [('state', '=', 'a')],
},
{
'state': 'b',
'state_count': 0,
'value': False,
'__domain': [('state', '=', 'b')],
},
{
'state': False,
'state_count': 3,
'value': 6,
'__domain': [('state', '=', False)],
},
])
def test_partial(self):
self.Model.create({'state': 'a', 'value': 1})
self.Model.create({'state': 'a', 'value': 2})
self.Model.create({'value': 3})
groups = self.Model.read_group([], fields=['state', 'value'], groupby=['state'])
self.assertEqual(groups, [
{
'state': 'a',
'state_count': 2,
'value': 3,
'__domain': [('state', '=', 'a')],
},
{
'state': 'b',
'state_count': 0,
'value': False,
'__domain': [('state', '=', 'b')],
},
{
'state': False,
'state_count': 1,
'value': 3,
'__domain': [('state', '=', False)],
},
])
def test_full(self):
self.Model.create({'state': 'a', 'value': 1})
self.Model.create({'state': 'b', 'value': 2})
self.Model.create({'value': 3})
groups = self.Model.read_group([], fields=['state', 'value'], groupby=['state'])
self.assertEqual(groups, [
{
'state': 'a',
'state_count': 1,
'value': 1,
'__domain': [('state', '=', 'a')],
},
{
'state': 'b',
'state_count': 1,
'value': 2,
'__domain': [('state', '=', 'b')],
},
{
'state': False,
'state_count': 1,
'value': 3,
'__domain': [('state', '=', False)],
},
])
@common.tagged("test_read_group_selection")
class TestSelectionReadGroup(common.TransactionCase):
def setUp(self):
super().setUp()
self.Model = self.env['test_read_group.on_selection']
def test_static_group_expand(self):
# this test verifies that the following happens when grouping by a Selection field with
# group_expand=True:
# - the order of the returned groups is the same as the order in which the
# options are declared in the field definition.
# - the groups returned include the empty groups, i.e. all groups, even those
# that have no records assigned to them, this is a (wanted) side-effect of the
# implementation.
# - the false group, i.e. records without the Selection field set, is last.
self.Model.create([
{"value": 1, "static_expand": "a"},
{"value": 2, "static_expand": "c"},
{"value": 3},
])
groups = self.Model.read_group(
[],
fields=["static_expand", "value"],
groupby=["static_expand"],
)
self.assertEqual(groups, [
{
'static_expand': 'c',
'static_expand_count': 1,
'value': 2,
'__domain': [('static_expand', '=', 'c')],
},
{
'static_expand': 'b',
'static_expand_count': 0,
'value': 0,
'__domain': [('static_expand', '=', 'b')],
},
{
'static_expand': 'a',
'static_expand_count': 1,
'value': 1,
'__domain': [('static_expand', '=', 'a')],
},
{
'static_expand': False,
'static_expand_count': 1,
'value': 3,
'__domain': [('static_expand', '=', False)],
},
])
def test_dynamic_group_expand(self):
# this test tests the same as the above test but with a Selection field whose
# options are dynamic, this means that the result of read_group when grouping by this
# field can change from one call to another.
self.Model.create([
{"value": 1, "dynamic_expand": "a"},
{"value": 2, "dynamic_expand": "c"},
{"value": 3},
])
groups = self.Model.read_group(
[],
fields=["dynamic_expand", "value"],
groupby=["dynamic_expand"],
)
self.assertEqual(groups, [
{
'dynamic_expand': 'c',
'dynamic_expand_count': 1,
'value': 2,
'__domain': [('dynamic_expand', '=', 'c')],
},
{
'dynamic_expand': 'b',
'dynamic_expand_count': 0,
'value': 0,
'__domain': [('dynamic_expand', '=', 'b')],
},
{
'dynamic_expand': 'a',
'dynamic_expand_count': 1,
'value': 1,
'__domain': [('dynamic_expand', '=', 'a')],
},
{
'dynamic_expand': False,
'dynamic_expand_count': 1,
'value': 3,
'__domain': [('dynamic_expand', '=', False)],
},
])
def test_no_group_expand(self):
# if group_expand is not defined on a Selection field, it should return only the necessary
# groups and in alphabetical order (PostgreSQL ordering)
self.Model.create([
{"value": 1, "no_expand": "a"},
{"value": 2, "no_expand": "c"},
{"value": 3},
])
groups = self.Model.read_group(
[],
fields=["no_expand", "value"],
groupby=["no_expand"],
)
self.assertEqual(groups, [
{
'no_expand': 'a',
'no_expand_count': 1,
'value': 1,
'__domain': [('no_expand', '=', 'a')],
},
{
'no_expand': 'c',
'no_expand_count': 1,
'value': 2,
'__domain': [('no_expand', '=', 'c')],
},
{
'no_expand': False,
'no_expand_count': 1,
'value': 3,
'__domain': [('no_expand', '=', False)],
},
])
| jeremiahyan/odoo | odoo/addons/test_read_group/tests/test_group_expand.py | Python | gpl-3.0 | 7,222 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.cloud._testing import _GAXBaseAPI
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
['phred@exammple.com', 'Phred', 'Phlyntstone', 32],
['bharney@example.com', 'Bharney', 'Rhubble', 31],
]
class TestTransaction(unittest.TestCase):
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = 'projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID
DATABASE_ID = 'database-id'
DATABASE_NAME = INSTANCE_NAME + '/databases/' + DATABASE_ID
SESSION_ID = 'session-id'
SESSION_NAME = DATABASE_NAME + '/sessions/' + SESSION_ID
TRANSACTION_ID = b'DEADBEEF'
def _getTargetClass(self):
from google.cloud.spanner.transaction import Transaction
return Transaction
def _make_one(self, session, *args, **kwargs):
transaction = self._getTargetClass()(session, *args, **kwargs)
session._transaction = transaction
return transaction
def test_ctor_defaults(self):
session = _Session()
transaction = self._make_one(session)
self.assertIs(transaction._session, session)
self.assertIsNone(transaction._transaction_id)
self.assertIsNone(transaction.committed)
self.assertFalse(transaction._rolled_back)
self.assertTrue(transaction._multi_use)
def test__check_state_not_begun(self):
session = _Session()
transaction = self._make_one(session)
with self.assertRaises(ValueError):
transaction._check_state()
def test__check_state_already_committed(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.committed = object()
with self.assertRaises(ValueError):
transaction._check_state()
def test__check_state_already_rolled_back(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction._rolled_back = True
with self.assertRaises(ValueError):
transaction._check_state()
def test__check_state_ok(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction._check_state() # does not raise
def test__make_txn_selector(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
selector = transaction._make_txn_selector()
self.assertEqual(selector.id, self.TRANSACTION_ID)
def test_begin_already_begun(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
with self.assertRaises(ValueError):
transaction.begin()
def test_begin_already_rolled_back(self):
session = _Session()
transaction = self._make_one(session)
transaction._rolled_back = True
with self.assertRaises(ValueError):
transaction.begin()
def test_begin_already_committed(self):
session = _Session()
transaction = self._make_one(session)
transaction.committed = object()
with self.assertRaises(ValueError):
transaction.begin()
def test_begin_w_gax_error(self):
from google.gax.errors import GaxError
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_random_gax_error=True)
session = _Session(database)
transaction = self._make_one(session)
with self.assertRaises(GaxError):
transaction.begin()
session_id, txn_options, options = api._begun
self.assertEqual(session_id, session.name)
self.assertTrue(txn_options.HasField('read_write'))
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_begin_ok(self):
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
transaction_pb = TransactionPB(id=self.TRANSACTION_ID)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_begin_transaction_response=transaction_pb)
session = _Session(database)
transaction = self._make_one(session)
txn_id = transaction.begin()
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(transaction._transaction_id, self.TRANSACTION_ID)
session_id, txn_options, options = api._begun
self.assertEqual(session_id, session.name)
self.assertTrue(txn_options.HasField('read_write'))
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_rollback_not_begun(self):
session = _Session()
transaction = self._make_one(session)
with self.assertRaises(ValueError):
transaction.rollback()
def test_rollback_already_committed(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.committed = object()
with self.assertRaises(ValueError):
transaction.rollback()
def test_rollback_already_rolled_back(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction._rolled_back = True
with self.assertRaises(ValueError):
transaction.rollback()
def test_rollback_w_gax_error(self):
from google.gax.errors import GaxError
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_random_gax_error=True)
session = _Session(database)
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.insert(TABLE_NAME, COLUMNS, VALUES)
with self.assertRaises(GaxError):
transaction.rollback()
self.assertFalse(transaction._rolled_back)
session_id, txn_id, options = api._rolled_back
self.assertEqual(session_id, session.name)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_rollback_ok(self):
from google.protobuf.empty_pb2 import Empty
empty_pb = Empty()
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_rollback_response=empty_pb)
session = _Session(database)
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.replace(TABLE_NAME, COLUMNS, VALUES)
transaction.rollback()
self.assertTrue(transaction._rolled_back)
self.assertIsNone(session._transaction)
session_id, txn_id, options = api._rolled_back
self.assertEqual(session_id, session.name)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_commit_not_begun(self):
session = _Session()
transaction = self._make_one(session)
with self.assertRaises(ValueError):
transaction.commit()
def test_commit_already_committed(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.committed = object()
with self.assertRaises(ValueError):
transaction.commit()
def test_commit_already_rolled_back(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction._rolled_back = True
with self.assertRaises(ValueError):
transaction.commit()
def test_commit_no_mutations(self):
session = _Session()
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
with self.assertRaises(ValueError):
transaction.commit()
def test_commit_w_gax_error(self):
from google.gax.errors import GaxError
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_random_gax_error=True)
session = _Session(database)
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.replace(TABLE_NAME, COLUMNS, VALUES)
with self.assertRaises(GaxError):
transaction.commit()
self.assertIsNone(transaction.committed)
session_id, mutations, txn_id, options = api._committed
self.assertEqual(session_id, session.name)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(mutations, transaction._mutations)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_commit_ok(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.spanner.keyset import KeySet
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
keys = [[0], [1], [2]]
keyset = KeySet(keys=keys)
response = CommitResponse(commit_timestamp=now_pb)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_commit_response=response)
session = _Session(database)
transaction = self._make_one(session)
transaction._transaction_id = self.TRANSACTION_ID
transaction.delete(TABLE_NAME, keyset)
transaction.commit()
self.assertEqual(transaction.committed, now)
self.assertIsNone(session._transaction)
session_id, mutations, txn_id, options = api._committed
self.assertEqual(session_id, session.name)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(mutations, transaction._mutations)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_context_mgr_success(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
transaction_pb = TransactionPB(id=self.TRANSACTION_ID)
database = _Database()
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_begin_transaction_response=transaction_pb,
_commit_response=response)
session = _Session(database)
transaction = self._make_one(session)
with transaction:
transaction.insert(TABLE_NAME, COLUMNS, VALUES)
self.assertEqual(transaction.committed, now)
session_id, mutations, txn_id, options = api._committed
self.assertEqual(session_id, self.SESSION_NAME)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(mutations, transaction._mutations)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_context_mgr_failure(self):
from google.protobuf.empty_pb2 import Empty
empty_pb = Empty()
from google.cloud.proto.spanner.v1.transaction_pb2 import (
Transaction as TransactionPB)
transaction_pb = TransactionPB(id=self.TRANSACTION_ID)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_begin_transaction_response=transaction_pb,
_rollback_response=empty_pb)
session = _Session(database)
transaction = self._make_one(session)
with self.assertRaises(Exception):
with transaction:
transaction.insert(TABLE_NAME, COLUMNS, VALUES)
raise Exception("bail out")
self.assertEqual(transaction.committed, None)
self.assertTrue(transaction._rolled_back)
self.assertEqual(len(transaction._mutations), 1)
self.assertEqual(api._committed, None)
session_id, txn_id, options = api._rolled_back
self.assertEqual(session_id, session.name)
self.assertEqual(txn_id, self.TRANSACTION_ID)
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
class _Database(object):
name = 'testing'
class _Session(object):
_transaction = None
def __init__(self, database=None, name=TestTransaction.SESSION_NAME):
self._database = database
self.name = name
class _FauxSpannerAPI(_GAXBaseAPI):
_committed = None
def begin_transaction(self, session, options_, options=None):
from google.gax.errors import GaxError
self._begun = (session, options_, options)
if self._random_gax_error:
raise GaxError('error')
return self._begin_transaction_response
def rollback(self, session, transaction_id, options=None):
from google.gax.errors import GaxError
self._rolled_back = (session, transaction_id, options)
if self._random_gax_error:
raise GaxError('error')
return self._rollback_response
def commit(self, session, mutations,
transaction_id='', single_use_transaction=None, options=None):
from google.gax.errors import GaxError
assert single_use_transaction is None
self._committed = (session, mutations, transaction_id, options)
if self._random_gax_error:
raise GaxError('error')
return self._commit_response
| tartavull/google-cloud-python | spanner/tests/unit/test_transaction.py | Python | apache-2.0 | 15,202 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2013 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Document
Provides a ly.document.Document api for a QTextDocument (or: more specifically
a Frescobaldi document.Document).
This can be used to perform operations from the ly module on a loaded
Frescobaldi document.
You don't need to save a Document instance. Just create it and use it, then
discard it.
"""
from PyQt5.QtGui import QTextCursor
import ly.document
import tokeniter
import highlighter
def cursor(cursor, select_all=False):
"""Return a Cursor for the specified QTextCursor.
The ly Cursor is instantiated with a Document proxying for the
original cursors document.
So you can call all operations in the ly module and they will work on a
Frescobaldi document (which is a subclass of QTextDocument).
If select_all is True, the ly Cursor selects the whole document if the
original cursor has no selection.
"""
if not select_all or cursor.hasSelection():
start, end = cursor.selectionStart(), cursor.selectionEnd()
else:
start, end = 0, None
return Cursor(Document(cursor.document()), start, end)
class Cursor(ly.document.Cursor):
"""A ly.document.Cursor with an extra cursor() method."""
def cursor(self):
"""Return a QTextCursor with the same selection."""
c = QTextCursor(self.document.document)
c.movePosition(QTextCursor.End) if self.end is None else c.setPosition(self.end)
c.setPosition(self.start, QTextCursor.KeepAnchor)
return c
class Document(ly.document.DocumentBase):
"""Document proxies a loaded Frescobaldi document (QTextDocument).
This is used to let the tools in the ly module operate on Frescobaldi
documents.
Creating a Document is very fast, you do not need to save it. When
applying the changes, Document starts an editblock, so that the
operations appears as one undo-item.
It is recommended to not nest calls to QTextCursor.beginEditBlock(), as
the highlighter is not called to update the tokens until the last
endEditBlock() is called.
Therefore Document provides a simple mechanism for combining several
change operations via the combine_undo attribute.
If combine_undo is None (the default), the first time changes are applied
QTextCursor.beginEditBlock() will be called, but subsequent times
QTextCursor.joinPreviousEditBlock() will be used. So the highlighter
updates the tokens between the operations, but they will appear as one
undo-item.
If you want to combine the very first operation already with an earlier
change, set combine_undo to True before the changes are applied (e.g.
before entering or exiting the context).
If you do not want to combine operations into a single undo-item at all,
set combine_undo to False.
(Of course you can nest calls to QTextCursor.beginEditBlock(), but in
that case the tokens will not be updated between your operations. If
your operations do not depend on the tokens, it is no problem
whatsoever. The tokens *are* updated after the last call to
QTextCursor.endEditBlock().)
"""
def __init__(self, document):
self._d = document
super(Document, self).__init__()
self.combine_undo = None
def __len__(self):
"""Return the number of blocks"""
return self._d.blockCount()
def __getitem__(self, index):
"""Return the block at the specified index."""
return self._d.findBlockByNumber(index)
@property
def document(self):
"""Return the QTextDocument we were instantiated with."""
return self._d
@property
def filename(self):
"""Return the document's local filename, if any."""
return self.document.url().toLocalFile()
def plaintext(self):
"""The document contents as a plain text string."""
return self._d.toPlainText()
def setplaintext(self, text):
"""Sets the document contents to the text string."""
self._d.setPlainText(text)
def size(self):
"""Return the number of characters in the document."""
return self._d.characterCount()
def block(self, position):
"""Return the text block at the specified character position.
The text block itself has no methods, but it can be used as an
argument to other methods of this class.
(Blocks do have to support the '==' operator.)
"""
return self._d.findBlock(position)
def index(self, block):
"""Return the linenumber of the block (starting with 0)."""
return block.blockNumber()
def position(self, block):
"""Return the position of the specified block."""
return block.position()
def text(self, block):
"""Return the text of the specified block."""
return block.text()
def next_block(self, block):
"""Return the next block, which may be invalid."""
return block.next()
def previous_block(self, block):
"""Return the previous block, which may be invalid."""
return block.previous()
def isvalid(self, block):
"""Return True if the block is a valid block."""
return block.isValid()
def apply_changes(self):
"""Apply the changes and update the tokens."""
c = QTextCursor(self._d)
# record a sensible position for undo
c.setPosition(self._changes_list[-1][0])
c.joinPreviousEditBlock() if self.combine_undo else c.beginEditBlock()
try:
for start, end, text in self._changes_list:
c.movePosition(QTextCursor.End) if end is None else c.setPosition(end)
c.setPosition(start, QTextCursor.KeepAnchor)
c.insertText(text)
finally:
c.endEditBlock()
if self.combine_undo is None:
self.combine_undo = True
def tokens(self, block):
"""Return the tuple of tokens of the specified block."""
return tokeniter.tokens(block)
def initial_state(self):
"""Return the state at the beginning of the document."""
return highlighter.highlighter(self._d).initialState()
def state(self, block):
"""Return the state at the start of the specified block."""
return tokeniter.state(block)
def state_end(self, block):
"""Return the state at the end of the specified block."""
return tokeniter.state_end(block)
class Runner(ly.document.Runner):
"""A Runner that adds a cursor() method, returning a QTextCursor."""
def cursor(self, start=0, end=None):
"""Returns a QTextCursor for the last token.
If start is given the cursor will start at position start in the token
(from the beginning of the token). Start defaults to 0.
If end is given, the cursor will end at that position in the token (from
the beginning of the token). End defaults to the length of the token.
"""
if end is None:
end = len(self.token())
c = QTextCursor(self.document.document)
c.setPosition(self.position() + start)
c.setPosition(self.position() + end, QTextCursor.KeepAnchor)
return c
class Source(ly.document.Source):
"""A Source that adds a cursor() method, returning a QTextCursor."""
def cursor(self, token, start=0, end=None):
"""Returns a QTextCursor for the specified token.
If start is given the cursor will start at position start in the token
(from the beginning of the token). Start defaults to 0.
If end is given, the cursor will end at that position in the token (from
the beginning of the token). End defaults to the length of the token.
"""
if end is None:
end = len(token)
c = QTextCursor(self.document.document)
pos = self.position(token)
c.setPosition(pos + start)
c.setPosition(pos + end, QTextCursor.KeepAnchor)
return c
| brownian/frescobaldi | frescobaldi_app/lydocument.py | Python | gpl-2.0 | 8,870 |
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^sql/$', 'sqlparser.views.parse_sql'),
)
| slack-sqlbot/slack-sqlbot | slack_sqlbot/urls.py | Python | mit | 164 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.