repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
amenonsen/ansible
|
lib/ansible/modules/network/fortios/fortios_system_external_resource.py
|
1
|
10703
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_external_resource
short_description: Configure external resource in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS device by allowing the
user to set and modify system feature and external_resource category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
choices:
- present
- absent
system_external_resource:
description:
- Configure external resource.
default: null
type: dict
suboptions:
category:
description:
- User resource category.
type: int
comments:
description:
- Comment.
type: str
name:
description:
- External resource name.
required: true
type: str
refresh_rate:
description:
- Time interval to refresh external resource (1 - 43200 min, default = 5 min).
type: int
resource:
description:
- URI of external resource.
type: str
status:
description:
- Enable/disable user resource.
type: str
choices:
- enable
- disable
type:
description:
- User resource type.
type: str
choices:
- category
- address
- domain
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure external resource.
fortios_system_external_resource:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_external_resource:
category: "3"
comments: "<your_own_value>"
name: "default_name_5"
refresh_rate: "6"
resource: "<your_own_value>"
status: "enable"
type: "category"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_external_resource_data(json):
option_list = ['category', 'comments', 'name',
'refresh_rate', 'resource', 'status',
'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_external_resource(data, fos):
vdom = data['vdom']
state = data['state']
system_external_resource_data = data['system_external_resource']
filtered_data = underscore_to_hyphen(filter_system_external_resource_data(system_external_resource_data))
if state == "present":
return fos.set('system',
'external-resource',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'external-resource',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_external_resource']:
resp = system_external_resource(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_external_resource": {
"required": False, "type": "dict", "default": None,
"options": {
"category": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"refresh_rate": {"required": False, "type": "int"},
"resource": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"type": {"required": False, "type": "str",
"choices": ["category", "address", "domain"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
| 169,487,628,331,970,240
| 29.234463
| 109
| 0.579464
| false
| 4.350813
| false
| false
| false
|
pinballwizard/horse
|
horse/settings.py
|
1
|
6060
|
"""
Django settings for horse project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+(k#-e2yrqo%^f!ga0ka7f!yy_cv0)_uj-h$avn-tgah%9umzg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
# 'chart_tools',
'base',
'flatlease',
'car_leasing',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'horse.urls'
LOGIN_URL = 'login'
ADMINS = (
('Vasiliy', 'menstenebris@gmail.com'),
)
MANAGERS = (
('Vasiliy', 'menstenebris@gmail.com'),
('Vera', 'securicula@gmail.com'),
)
SERVER_EMAIL = 'horse@django' # Адрес отправителя почты
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'horse.wsgi.application'
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# # 'file': {
# # 'level': 'DEBUG',
# # 'class': 'logging.handlers.TimedRotatingFileHandler',
# # 'when': 'd',
# # # 'interval': '1',
# # 'encoding': 'UTF8',
# # 'formatter': 'simple',
# # 'filename': '/var/log/horse/debug.log',
# # },
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'encoding': 'UTF8',
# 'formatter': 'verbose',
# 'filename': '/var/log/horse/debug.log',
# },
# 'null': {
# 'level': 'DEBUG',
# 'class': 'logging.NullHandler',
# },
# 'console': {
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple'
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'class': 'django.utils.log.AdminEmailHandler',
# 'include_html': True,
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console', 'file'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins', 'console', 'file'],
# 'level': 'INFO',
# 'propagate': False,
# },
# 'flatlease': {
# 'handlers': ['console', 'file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# 'django.db.backends': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# 'django.security.DisallowedHost': {
# 'handlers': ['mail_admins'],
# 'propagate': False,
# },
# },
# }
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'horse',
'USER': 'django',
'PASSWORD': '14875264',
},
'flat': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'flat',
'USER': 'django',
'PASSWORD': '14875264',
},
'car': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '54.93.55.209',
'PORT': '5432',
'NAME': 'car',
'USER': 'django',
'PASSWORD': '14875264',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Krasnoyarsk'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.2006', '25.10.06'
'%d-%m-%Y', '%d/%m/%Y', '%d/%m/%y', # '25-10-2006', '25/10/2006', '25/10/06'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006',
'%d.%m.%Y',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = 'media/'
|
gpl-3.0
| 4,460,702,238,426,681,000
| 26.207207
| 97
| 0.539493
| false
| 3.213944
| false
| false
| false
|
MightyPork/stm32-asm-examples
|
registers/gen/to_asm_f30x.py
|
1
|
4316
|
from cmsis_svd.parser import SVDParser
import json
import re
# ------------------------------------
svd_name = 'STM32F30x.svd'
want_ofs = True
want_len = True
# Do not print poripheral field definitions (same as first instance)
no_print_fields = [
'GPIOB',
'GPIOC',
'GPIOD',
'GPIOE',
'GPIOF',
'GPIOG',
'USART2',
'USART3',
'ADC2',
'ADC3',
'ADC4',
'ADC34',
'I2C2',
'I2C3',
'SPI2',
'SPI3',
]
# Same registers as... (points to first instance)
same_regs_as = {
'GPIOB': 'GPIOA',
'GPIOC': 'GPIOA',
'GPIOD': 'GPIOA',
'GPIOE': 'GPIOA',
'GPIOF': 'GPIOA',
'GPIOG': 'GPIOG',
'GPIOH': 'GPIOH',
'USART2': 'USART1',
'USART3': 'USART1',
'TIM4': 'TIM3',
'DAC2': 'DAC1',
'ADC2': 'ADC1',
'ADC3': 'ADC1',
'ADC4': 'ADC1',
'ADC34': 'ADC12',
'I2C2': 'I2C1',
'I2C3': 'I2C1',
'SPI2': 'SPI1',
'SPI3': 'SPI1',
}
# Rename peripheral when building field definitions
# Used for multiple instances (build fields only for the first)
periph_rename_for_field = {
'GPIOA': 'GPIO',
'USART1': 'USART',
'DAC1': 'DAC',
'ADC12': 'ADCC',
'I2C1': 'I2C'
}
# Rename peripheral when generating (bad name in SVD)
periph_rename = {
'ADC1_2': 'ADC12',
'ADC3_4': 'ADC34',
'Flash': 'FLASH'
}
# ------------------------------------
base_line = "{0:<30} EQU {1:#x}"
reg_line = "{0:<30} EQU ({1}_BASE + {2:#x})"
field_line = "{0:<30} EQU {1:#010x}"
field_ofs_line = "{0:<30} EQU {1:#d}"
field_len_line = field_ofs_line
def comment_str(x):
if x is None:
return ''
return '; %s' % re.sub(r"[\s\n]+", ' ', x.replace('\n',' '))
def comment(x):
print(comment_str(x))
def banner(x):
comment('==== {:=<55}'.format("%s " % x))
def caption(x):
print()
comment('---- {:-<55}'.format("%s " % x))
def comment(x):
print(comment_str(x))
# ------------------------------------
parser = SVDParser.for_packaged_svd('STMicro', svd_name)
device = parser.get_device()
print()
banner('%s PERIPHERALS' % device.name)
comment('')
comment('CTU Prague, FEL, Department of Measurement')
comment('')
comment('-' * 60)
comment('')
comment('Generated from "%s"' % svd_name)
comment('')
comment('SVD parsing library (c) Paul Osborne, 2015-2016')
comment(' https://github.com/posborne/cmsis-svd')
comment('ASM building script (c) Ondrej Hruska, 2016')
comment('')
comment('=' * 60)
print()
# periph registers
def print_registers(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print(reg_line.format("%s_%s" % (pname, register.name), pname, register.address_offset), end=' ')
comment(register.description)
# periph fields
def print_fields(peripheral, pname=None):
if pname is None:
pname = periph_rename.get(peripheral.name, peripheral.name)
for register in peripheral.registers:
print()
comment('%s_%s fields:' % (pname, register.name))
print()
for field in register.fields:
mask = ((1 << field.bit_width) - 1) << field.bit_offset
f_pname = periph_rename_for_field.get(pname, pname)
print(field_line.format("%s_%s_%s" % (f_pname, register.name, field.name), mask), end=' ')
comment(field.description)
if want_ofs:
print(field_ofs_line.format("%s_%s_%s_ofs" % (f_pname, register.name, field.name), field.bit_offset))
if want_len:
print(field_len_line.format("%s_%s_%s_len" % (f_pname, register.name, field.name), field.bit_width))
print()
# Print the list
periph_dict = {}
for peripheral in device.peripherals:
periph_name = periph_rename.get(peripheral.name, peripheral.name)
# add to a dict for referencing by name
periph_dict[periph_name] = peripheral
# -----
caption(periph_name)
comment('Desc: %s' % peripheral.description)
print()
comment('%s base address:' % periph_name)
print(base_line.format("%s_BASE" % periph_name, peripheral.base_address))
print()
comment('%s registers:' % periph_name)
print()
# Registers
if periph_name in same_regs_as:
print_registers(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_registers(peripheral)
if periph_name in no_print_fields:
comment('Fields the same as in the first instance.')
continue
# Fields
if periph_name in same_regs_as:
print_fields(periph_dict[same_regs_as[periph_name]], pname=periph_name)
else:
print_fields(peripheral)
print(' END\n')
|
mit
| -8,582,438,179,664,634,000
| 19.951456
| 105
| 0.63114
| false
| 2.541814
| false
| false
| false
|
alfredodeza/remoto
|
remoto/backends/__init__.py
|
1
|
11528
|
import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
|
mit
| -5,670,547,759,678,942,000
| 34.470769
| 111
| 0.581974
| false
| 4.438968
| false
| false
| false
|
fergalmoran/robotopro
|
promotions/mixins.py
|
1
|
1179
|
import json
from django.http import HttpResponse
class AjaxableResponseMixin(object):
"""
Mixin to add AJAX support to a form.
Must be used with an object-based FormView (e.g. CreateView)
"""
def render_to_json_response(self, context, **response_kwargs):
data = json.dumps(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_invalid(self, form):
response = super(AjaxableResponseMixin, self).form_invalid(form)
if self.request.is_ajax():
return self.render_to_json_response(form.errors, status=400)
else:
return response
def form_valid(self, form):
# We make sure to call the parent's form_valid() method because
# it might do some processing (in the case of CreateView, it will
# call form.save() for example).
response = super(AjaxableResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
return self.render_to_json_response(data)
else:
return response
|
apache-2.0
| -6,176,625,241,103,926,000
| 34.727273
| 73
| 0.622561
| false
| 4.023891
| false
| false
| false
|
melqkiades/yelp
|
source/python/perfomancetest/context_recommender_tests.py
|
1
|
16102
|
import cPickle as pickle
import copy
import time
import itertools
import numpy
from etl import ETLUtils
from evaluation import precision_in_top_n
from recommenders.context.baseline.simple_user_baseline_calculator import \
SimpleUserBaselineCalculator
from recommenders.context.baseline.user_baseline_calculator import \
UserBaselineCalculator
from recommenders.context.contextual_knn import ContextualKNN
from recommenders.context.neighbour_contribution.neighbour_contribution_calculator import \
NeighbourContributionCalculator
from recommenders.context.neighbour_contribution.context_nc_calculator import \
ContextNCCalculator
from recommenders.context.neighbourhood.context_neighbourhood_calculator import \
ContextNeighbourhoodCalculator
from recommenders.context.neighbourhood.context_hybrid_neighbourhood_calculator import \
ContextHybridNeighbourhoodCalculator
from recommenders.context.neighbourhood.simple_neighbourhood_calculator import \
SimpleNeighbourhoodCalculator
from recommenders.context.similarity.cbc_similarity_calculator import \
CBCSimilarityCalculator
from recommenders.context.similarity.cosine_similarity_calculator import \
CosineSimilarityCalculator
from recommenders.context.similarity.pbc_similarity_calculator import \
PBCSimilarityCalculator
from recommenders.context.similarity.pearson_similarity_calculator import \
PearsonSimilarityCalculator
from topicmodeling.context import lda_context_utils
from topicmodeling.context.lda_based_context import LdaBasedContext
from tripadvisor.fourcity import recommender_evaluator
from tripadvisor.fourcity import extractor
__author__ = 'fpena'
RMSE_HEADERS = [
'dataset',
'cache_reviews',
'num_records',
'reviews_type',
'cross_validation_folds',
'RMSE',
'MAE',
'coverage',
'time',
'name',
'neighbourhood_calculator',
'neighbour_contribution_calculator',
'user_baseline_calculator',
'user_similarity_calculator',
'num_neighbours',
'num_topics',
'threshold1',
'threshold2',
'threshold3',
'threshold4'
]
TOPN_HEADERS = [
'dataset',
'cache_reviews',
'num_records',
'reviews_type',
'cross_validation_folds',
'min_like_score',
'top_n',
'recall',
'coverage',
'time',
'name',
'neighbourhood_calculator',
'neighbour_contribution_calculator',
'user_baseline_calculator',
'user_similarity_calculator',
'num_neighbours',
'num_topics',
'threshold1',
'threshold2',
'threshold3',
'threshold4'
]
def get_knn_recommender_info(recommender):
recommender_name = recommender.__class__.__name__
nc_name = recommender.neighbourhood_calculator.__class__.__name__
ncc_name = recommender.neighbour_contribution_calculator.__class__.__name__
ubc_name = recommender.user_baseline_calculator.__class__.__name__
usc_name = recommender.user_similarity_calculator.__class__.__name__
recommender_info_map = {}
recommender_info_map['name'] = recommender_name
recommender_info_map['neighbourhood_calculator'] = nc_name
recommender_info_map['neighbour_contribution_calculator'] = ncc_name
recommender_info_map['user_baseline_calculator'] = ubc_name
recommender_info_map['user_similarity_calculator'] = usc_name
recommender_info_map['num_neighbours'] = recommender.num_neighbours
recommender_info_map['num_topics'] = recommender.num_topics
recommender_info_map['threshold1'] = recommender.threshold1
recommender_info_map['threshold2'] = recommender.threshold2
recommender_info_map['threshold3'] = recommender.threshold3
recommender_info_map['threshold4'] = recommender.threshold4
return recommender_info_map
def load_records(json_file):
records = ETLUtils.load_json_file(json_file)
fields = ['user_id', 'business_id', 'stars', 'text']
records = ETLUtils.select_fields(fields, records)
# We rename the 'stars' field to 'overall_rating' to take advantage of the
# function extractor.get_user_average_overall_rating
for record in records:
record['overall_rating'] = record.pop('stars')
record['offering_id'] = record.pop('business_id')
return records
def run_rmse_test(
records_file, recommenders, binary_reviews_file, reviews_type=None):
records = load_records(records_file)
# records = extractor.remove_users_with_low_reviews(records, 2)
with open(binary_reviews_file, 'rb') as read_file:
binary_reviews = pickle.load(read_file)
if len(records) != len(binary_reviews):
raise ValueError("The records and reviews should have the same length")
num_folds = 5
dataset_info_map = {}
dataset_info_map['dataset'] = records_file.split('/')[-1]
dataset_info_map['cache_reviews'] = binary_reviews_file.split('/')[-1]
dataset_info_map['num_records'] = len(records)
dataset_info_map['reviews_type'] = reviews_type
dataset_info_map['cross_validation_folds'] = num_folds
results_list = []
results_log_list = []
count = 0
print('Total recommenders: %d' % (len(recommenders)))
for recommender in recommenders:
print('\n**************\n%d/%d\n**************' %
(count, len(recommenders)))
results = recommender_evaluator.perform_cross_validation(
records, recommender, num_folds, binary_reviews, reviews_type)
results_list.append(results)
remaining_time = results['Execution time'] * (len(recommenders) - count)
remaining_time /= 3600
print('Estimated remaining time: %.2f hours' % remaining_time)
count += 1
for recommender, results in zip(recommenders, results_list):
results_log_list.append(process_rmse_results(recommender, results, dataset_info_map))
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_name = 'recommender-rmse-results' + timestamp
ETLUtils.save_csv_file(file_name + '.csv', results_log_list, RMSE_HEADERS, '\t')
def run_top_n_test(
records_file, recommenders, binary_reviews_file, reviews_type=None):
records = load_records(records_file)
# records = extractor.remove_users_with_low_reviews(records, 2)
with open(binary_reviews_file, 'rb') as read_file:
binary_reviews = pickle.load(read_file)
if len(records) != len(binary_reviews):
raise ValueError("The records and reviews should have the same length")
num_folds = 5
split = 0.986
min_like_score = 5.0
top_n = 10
dataset_info_map = {}
dataset_info_map['dataset'] = records_file.split('/')[-1]
dataset_info_map['cache_reviews'] = binary_reviews_file.split('/')[-1]
dataset_info_map['num_records'] = len(records)
dataset_info_map['reviews_type'] = reviews_type
dataset_info_map['cross_validation_folds'] = num_folds
dataset_info_map['min_like_score'] = min_like_score
dataset_info_map['top_n'] = top_n
results_list = []
results_log_list = []
count = 0
print('Total recommenders: %d' % (len(recommenders)))
for recommender in recommenders:
print('\n**************\nProgress: %d/%d\n**************' %
(count, len(recommenders)))
print(get_knn_recommender_info(recommender))
results = precision_in_top_n.calculate_recall_in_top_n(
records, recommender, top_n, num_folds, split, min_like_score,
binary_reviews, reviews_type)
results_list.append(results)
remaining_time = results['Execution time'] * (len(recommenders) - count)
remaining_time /= 3600
print('Estimated remaining time: %.2f hours' % remaining_time)
count += 1
for recommender, results in zip(recommenders, results_list):
results_log_list.append(process_topn_results(recommender, results, dataset_info_map))
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_name = 'recommender-topn-results' + timestamp
ETLUtils.save_csv_file(file_name + '.csv', results_log_list, TOPN_HEADERS, '\t')
def process_rmse_results(recommender, results, dataset_info):
log = dataset_info.copy()
log.update(get_knn_recommender_info(recommender))
log['MAE'] = results['MAE']
log['RMSE'] = results['RMSE']
log['coverage'] = results['Coverage']
log['time'] = results['Execution time']
return log
def process_topn_results(recommender, results, dataset_info):
log = dataset_info.copy()
log.update(get_knn_recommender_info(recommender))
log['recall'] = results['Top N']
log['coverage'] = results['Coverage']
log['time'] = results['Execution time']
return log
def combine_recommenders(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
thresholds,
num_topics_list):
combined_recommenders = []
for neighbourhood_calculator,\
neighbour_contribution_calculator,\
baseline_calculator,\
similarity_calculator,\
num_neighbours,\
threshold,\
num_topics\
in itertools.product(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
thresholds,
num_topics_list):
recommender = ContextualKNN(
None, None, None, None, None, has_context=True)
recommender.neighbourhood_calculator = neighbourhood_calculator
recommender.neighbour_contribution_calculator =\
neighbour_contribution_calculator
recommender.user_baseline_calculator = baseline_calculator
recommender.user_similarity_calculator = similarity_calculator
recommender.num_neighbours = num_neighbours
recommender.threshold1 = threshold
recommender.threshold2 = threshold
recommender.threshold3 = threshold
recommender.threshold4 = threshold
recommender.num_topics = num_topics
combined_recommenders.append(recommender)
return combined_recommenders
def get_recommenders_set():
# nc = ContextNeighbourhoodCalculator()
# ncc = NeighbourContributionCalculator()
# ubc = UserBaselineCalculator()
# usc = PBCSimilarityCalculator()
# cosine_usc = CBCSimilarityCalculator()
# Similarity calculators
cosine_sc = CosineSimilarityCalculator()
pearson_sc = PearsonSimilarityCalculator()
pbc_sc = PBCSimilarityCalculator()
cbu_sc = CBCSimilarityCalculator()
similarity_calculators = [
cosine_sc,
pearson_sc,
pbc_sc,
cbu_sc
]
# Neighbourhood calculators
simple_nc = SimpleNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
context_nc = ContextNeighbourhoodCalculator()
# hybrid_nc0 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
# hybrid_nc0.weight = 0.0
hybrid_nc02 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc02.weight = 0.2
hybrid_nc05 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc05.weight = 0.5
hybrid_nc08 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
hybrid_nc08.weight = 0.8
# hybrid_nc1 = ContextHybridNeighbourhoodCalculator(copy.deepcopy(pearson_sc))
# hybrid_nc1.weight = 1.0
neighbourhood_calculators = [
simple_nc,
context_nc,
# hybrid_nc0,
# hybrid_nc02,
hybrid_nc05,
# hybrid_nc08,
# hybrid_nc1
]
# Baseline calculators
simple_ubc = SimpleUserBaselineCalculator()
ubc = UserBaselineCalculator()
baseline_calculators = [
ubc,
simple_ubc
]
# Neighbour contribution calculators
ncc = NeighbourContributionCalculator()
context_ncc = ContextNCCalculator()
neighbour_contribution_calculators = [
ncc,
# context_ncc
]
num_topics = 150
# num_neighbours = None
numpy.random.seed(0)
basic_cosine_knn = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, cosine_sc, has_context=False)
basic_pearson_knn = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, pearson_sc, has_context=False)
contextual_knn = ContextualKNN(num_topics, context_nc, ncc, ubc, pbc_sc, has_context=True)
# get_knn_recommender_info(contextual_knn1)
# ocelma_recommender = OcelmaRecommender()
recommenders = [
# basic_cosine_knn,
# basic_pearson_knn,
contextual_knn
# ocelma_recommender
]
num_neighbours_list = [None]
# num_neighbours_list = [None, 3, 6, 10, 15, 20]
threshold_list = [0.0, 0.5, 0.9]
# threshold_list = [0.0]
# num_topics_list = [10, 50, 150, 300, 500]
num_topics_list = [150]
# combined_recommenders = []
# for recommender, num_neighbours in itertools.product(recommenders, num_neighbours_list):
# new_recommender = copy.deepcopy(recommender)
# new_recommender.num_neighbours = num_neighbours
# combined_recommenders.append(new_recommender)
# threshold_list = [None]
#
# combined_recommenders = []
# for recommender, threshold in itertools.product(recommenders, threshold_list):
# new_recommender = copy.deepcopy(recommender)
# new_recommender.threshold1 = threshold
# new_recommender.threshold2 = threshold
# new_recommender.threshold3 = threshold
# new_recommender.threshold4 = threshold
# combined_recommenders.append(new_recommender)
# num_threshold_list = [0.2, 0.5, 0.7]
combined_recommenders = combine_recommenders(
neighbourhood_calculators,
neighbour_contribution_calculators,
baseline_calculators,
similarity_calculators,
num_neighbours_list,
threshold_list,
num_topics_list
)
baseline_recommender = ContextualKNN(num_topics, simple_nc, ncc, simple_ubc, pearson_sc, has_context=True)
best_recommender = ContextualKNN(num_topics, hybrid_nc05, ncc, simple_ubc, pbc_sc, has_context=True)
# best_recommender = ContextualKNN(num_topics, simple_nc, ncc, ubc, cosine_sc, has_context=True)
best_recommender.threshold1 = 0.9
best_recommender.threshold2 = 0.9
best_recommender.threshold3 = 0.9
best_recommender.threshold4 = 0.9
my_recommenders = [
# baseline_recommender,
best_recommender
]
return my_recommenders
# return combined_recommenders
def main():
print('Process start: %s' % time.strftime("%Y/%d/%m-%H:%M:%S"))
folder = '/Users/fpena/UCC/Thesis/datasets/context/'
my_records_file = folder + 'yelp_training_set_review_hotels_shuffled.json'
# my_records_file = folder + 'yelp_training_set_review_restaurants_shuffled.json'
# my_records_file = folder + 'yelp_training_set_review_spas_shuffled.json'
# my_binary_reviews_file = folder + 'reviews_restaurant_shuffled.pkl'
my_binary_reviews_file = folder + 'reviews_hotel_shuffled.pkl'
# my_binary_reviews_file = folder + 'reviews_restaurant_shuffled_20.pkl'
# my_binary_reviews_file = folder + 'reviews_spa_shuffled_2.pkl'
# my_binary_reviews_file = folder + 'reviews_context_hotel_2.pkl'
combined_recommenders = get_recommenders_set()
run_rmse_test(my_records_file, combined_recommenders, my_binary_reviews_file)
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file)
# run_rmse_test(my_records_file, combined_recommenders[47:], my_binary_reviews_file, 'specific')
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file, 'specific')
# run_rmse_test(my_records_file, combined_recommenders[47:], my_binary_reviews_file, 'generic')
# run_top_n_test(my_records_file, combined_recommenders, my_binary_reviews_file, 'generic')
start = time.time()
main()
end = time.time()
total_time = end - start
# print("Total time = %f seconds" % total_time)
|
lgpl-2.1
| 8,918,031,726,753,243,000
| 34.234136
| 110
| 0.679232
| false
| 3.41361
| true
| false
| false
|
pyfidelity/zfsbackup
|
zfsbackup/zfs.py
|
1
|
6372
|
#
# Copyright (c) 2010, Mij <mij@sshguard.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and
# the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# See http://mij.oltrelinux.com/devel/zfsbackup/
# Bitch to mij@sshguard.net
#
# module zfs
import os
import subprocess
ZFS_DEFAULT_SNAPSHOT_DIR='/.zfs/snapshot'
def pass_zfs_pool(f):
"""Decorator to pass the appropriate ZFS pool parameter at runtime, if none specified.
Calls f(original args, zpool=value)."""
def _decorator(*args, **kwargs):
if 'zpool' not in kwargs.keys() or not kwargs['zpool']:
# default to first zpool
kwargs.update({'zpool': get_default_pool()})
return f(*args, **kwargs)
return _decorator
def get_pools():
"""Return a list of ZFS pools available on the system"""
command = 'zpool list -H'
try:
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
except OSError:
raise Exception('No ZFS tools found!')
zpout, zperr = p.communicate()
if p.returncode:
raise Exception("Error executing '%s': %d" % (command, p.returncode))
return [line.split('\t', 1)[0] for line in zpout.split('\n') if line]
def get_default_pool():
"""Return the primary ZFS pool configured in the system"""
return os.environ.get('ZFS_POOL', get_pools()[0])
@pass_zfs_pool
def get_datasets(zpool=None, strip_poolname=True):
"""Return a list of ZFS datasets available in a specific pool, or in all.
The root dataset is returned as an empty string."""
if zpool and zpool not in get_pools():
raise Exception("Pool '%s' is not available on this system!" % zpool)
command = 'zfs list -t filesystem -H'
try:
p = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
except OSError:
raise Exception("zfs not found. Cannot execute '%s'" % command)
zfsout, zfserr = p.communicate()
if p.returncode:
print "Error executing '%s': %d" % (command, p.returncode)
return []
datasets = []
for line in zfsout.split('\n'):
dsname = line.split('\t', 1)[0]
if not dsname: continue
dspool, sep, mountpoint = dsname.partition('/')
if zpool and dspool != zpool:
continue
if strip_poolname:
# produce '/my/mountpoint' for children and '' for root dataset
datasets.append(sep + mountpoint)
else:
datasets.append(dsname)
return datasets
@pass_zfs_pool
def destroy_snapshot(snapname, dataset='', recursive=True, zpool=None):
"""Remove a snapshot, from root or in a specific dataset.
If dataset is not specified, the snapshot is destroyed from the root.
If a zpool is specified, remove from there; else remove from the default zpool."""
fullsnapname = "%s%s@%s" % (zpool, dataset, snapname)
print "Destroying snapshot '%s'" % fullsnapname
if recursive:
command = 'zfs destroy -r %s' % fullsnapname
else:
command = 'zfs destroy %s' % fullsnapname
#print "Exec '%s'" % command
assert command.find('@') != -1 # we are not destroying datasets, only snapshots
p = subprocess.Popen(command.split(' '))
p.wait()
if p.returncode != 0 and p.returncode != 1: # 1 = snapshot did not exist. We can stand that
raise Exception("Error executing '%s': %d" % (command, p.returncode))
@pass_zfs_pool
def take_snapshot(snapname, restrictdatasets=None, nodatasets=None, recursive=True, zpool=None):
"""Take a recursive snapshot with the given name, possibly excluding some datasets.
restrictdatasets and nodatasets are optional lists of datasets to include or exclude
from the recursive snapshot."""
# take recursive snapshot of all datasets...
fullsnapname = '%s@%s' % (zpool, snapname)
print "Taking snapshot '%s'" % fullsnapname
if restrictdatasets:
restrictdatasets = [ds.rstrip('/') for ds in restrictdatasets]
print "Restricting to:", str(restrictdatasets)
print "Excluding:", str(nodatasets)
if recursive:
command = 'zfs snapshot -r %s' % fullsnapname
else:
command = 'zfs snapshot %s' % fullsnapname
#print "Exec '%s'" % command
p = subprocess.Popen(command.split(' '))
p.wait()
if p.returncode:
raise Exception("Error executing '%s': %d" % (command, p.returncode))
# ... then prune away undesired datasets if necessary
if restrictdatasets:
# remove whatever is not required, under ours
for ds in get_datasets():
# do not remove /usr/foo if there is any wanted dataset starting with /usr
if not filter(lambda x: ds.startswith(x), restrictdatasets):
destroy_snapshot(snapname, ds, recursive=False)
if nodatasets:
# remove whatever is explicitly excluded
for ds in get_datasets():
if ds in nodatasets:
destroy_snapshot(snapname, dataset=ds, recursive=True)
def get_snapshots(dataset=''):
"""Return the list of snapshots order by increasing timestamp"""
# filter my tags
return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)
|
bsd-2-clause
| 4,368,549,535,226,458,600
| 39.075472
| 96
| 0.672003
| false
| 3.950403
| false
| false
| false
|
mnjy/critters
|
CrittersProto/generator/boxworm_algorithm.py
|
1
|
1821
|
#####################################################################
#
# hummingloop_algorithm.py
#
# Copyright (c) 2015, Nick Benson
# Modifications by benchan
#
# Released under the MIT License (http://opensource.org/licenses/MIT)
#
#####################################################################
import random as r
def choose_notes():
notes = []
pattern = r.choice(PATTERNS)
subpat_dict = {}
# Generate subpattern dictionary
for mapping in pattern[1]:
# Generate subpattern
new_subpat = []
subpat_probs = r.choice(HIT_PROBS)
for i in range(mapping[1]):
if r.random() < subpat_probs[i]:
new_subpat.append(r.choice(HITS))
else:
new_subpat.append(-1)
subpat_dict[mapping[0]] = new_subpat
# Generate notes based on pattern
for char in pattern[0]:
notes += subpat_dict[char]
# Late-pass mutation: Ensure first-note hit
notes[0] = r.choice(HITS)
# Late-pass mutation: Alternate rapid sequence hits
cur_hit = -1
for i in range(len(notes)):
if notes[i] == cur_hit:
notes[i] = ALT_HITS[notes[i]]
cur_hit = notes[i]
print "Notes: " + str(notes)
return notes
# Rhythm patterns
PATTERN_1 = ("ABABABAC", [("A", 8), ("B", 8), ("C", 8)])
PATTERN_2 = ("AABAABAABAAC", [("A", 4), ("B", 8), ("C", 8)])
PATTERN_3 = ("ABABABACC", [("A", 8), ("B", 8), ("C", 4)])
PATTERNS = [PATTERN_1, PATTERN_2, PATTERN_3]
# 16th slot hit probabilities
HIT_PROB_1 = [0.6, 0.4, 0.5, 0.4]*4
HIT_PROB_2 = [0.8, 0.3, 0.7, 0.3]*4
HIT_PROB_3 = [0.3, 0.8, 0.5, 0.6]*4
HIT_PROBS = [HIT_PROB_1, HIT_PROB_2, HIT_PROB_3]
# Possible hits
HITS = [48, 45, 42, 35]
ALT_HITS = {-1:-1, 48:50, 45:47, 42:-1, 35:36}
# Interaction configuration
NOTE_VELOCITY_MULT = 0.5
|
mit
| -1,726,495,276,093,130,500
| 27.46875
| 69
| 0.533773
| false
| 2.918269
| false
| false
| false
|
warner/magic-wormhole
|
src/wormhole/journal.py
|
1
|
1179
|
from __future__ import absolute_import, print_function, unicode_literals
import contextlib
from zope.interface import implementer
from ._interfaces import IJournal
@implementer(IJournal)
class Journal(object):
def __init__(self, save_checkpoint):
self._save_checkpoint = save_checkpoint
self._outbound_queue = []
self._processing = False
def queue_outbound(self, fn, *args, **kwargs):
assert self._processing
self._outbound_queue.append((fn, args, kwargs))
@contextlib.contextmanager
def process(self):
assert not self._processing
assert not self._outbound_queue
self._processing = True
yield # process inbound messages, change state, queue outbound
self._save_checkpoint()
for (fn, args, kwargs) in self._outbound_queue:
fn(*args, **kwargs)
self._outbound_queue[:] = []
self._processing = False
@implementer(IJournal)
class ImmediateJournal(object):
def __init__(self):
pass
def queue_outbound(self, fn, *args, **kwargs):
fn(*args, **kwargs)
@contextlib.contextmanager
def process(self):
yield
|
mit
| -7,589,996,619,548,008,000
| 25.795455
| 72
| 0.64207
| false
| 4.122378
| false
| false
| false
|
jhogsett/linkit
|
python/simon5.py
|
1
|
5584
|
#!/usr/bin/python
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
# print cmd_text
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pau")
command("rst:clr:pau")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zon:red:7:rep:grn:7:rep:org:7:rep:blu:7:rep")
command("5:zon:red:5:rep:grn:5:rep:org:5:rep:blu:5:rep")
command("4:zon:red:3:rep:grn:3:rep:org:3:rep:blu:3:rep")
command("3:zon:red:2:rep:grn:2:rep:org:2:rep:blu:2:rep")
command("2:zon:red:1:rep:grn:1:rep:org:1:rep:blu:1:rep")
command("1:zon:wht")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "white", "gray", "dkgray" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":blink" + str(zone) + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
def do_zone(zone):
command(str(zone) + ":zone:rotate")
def do_zones():
for i in range(2, 7):
do_zone(i)
command("flush")
global idx, color, rotation_count, change_count
idx = -1
color = 'pink'
rotation_count = 0
past_colors = ['', '', '', '', '', '', 'red', 'green', 'orange', 'blue']
change_count = 0
def loop():
global idx, rotation_count, color, change_count
do_flush = False
idx = idx + 1
if (idx % 3 == 0):
command("6:zon:" + color)
do_flush = True
if (idx % 4 == 0):
command("5:zon:" + color)
do_flush = True
if (idx % 6 == 0):
command("4:zon:" + color)
do_flush = True
if (idx % 8 == 0):
command("3:zon:" + color)
do_flush = True
if (idx % 12 == 0):
command("2:zon:" + color)
do_flush = True
if do_flush == True:
command("flu")
rotation_count += 1
if(rotation_count == 24):
change_count = (change_count + 1) % 2
if(change_count == 0):
color = "black"
else:
color = random_color()
while(color in past_colors):
color = random_color()
past_colors.pop(0)
past_colors.append(color)
rotation_count = 0
time.sleep(play_time)
if __name__ == '__main__':
setup()
while True:
loop()
|
mit
| 4,013,220,484,216,460
| 37.777778
| 179
| 0.344914
| false
| 4.452951
| false
| false
| false
|
emccode/HeliosBurn
|
heliosburn/proxy/modules/server_overload.py
|
1
|
4841
|
import datetime
import random
from injectors import ExponentialInjector
from injectors import PlateauInjector
from module import AbstractModule
from twisted.python import log
from module_decorators import SkipHandler
from models import SOProfileModel
# ultimately pull from settings file
injector_map = {
"exponential": ExponentialInjector,
"plateau": PlateauInjector
}
class ResponseTrigger(object):
def __init__(self, min_load, max_load, probability):
self.min_load = min_load
self.max_load = max_load
self.probability = probability
self.metrics = {}
self.delay = 0
def match(self, load):
matched = False
if load >= self.min_load and load <= self.max_load:
if random.random() <= self.probability/100:
matched = True
if matched:
self.metrics[self.__class__.__name__] += 1
return matched
def get_response(self):
pass
class SimulatedResponseTrigger(ResponseTrigger):
def __init__(self, min_load, max_load, probability, response):
ResponseTrigger.__init__(self, min_load, max_load, probability)
self.response = response
def get_response(self):
return self.response
class DelayedResponseTrigger(ResponseTrigger):
def __init__(self, min_load, max_load, probability, delay):
ResponseTrigger.__init__(self, min_load, max_load, probability)
self.delay = delay
def get_response(self):
return None
class ServerOverload(AbstractModule):
triggers = []
injectors = []
response = None
def __init__(self):
AbstractModule.__init__(self)
self.redis_host = '127.0.0.1'
self.redis_port = 6379
self.redis_db = 0
self.mongo_host = 'heliosburn.traffic'
self.mongo_port = '127.0.0.1'
self.mongo_db = 'heliosburn'
self.stats['ServerOverload'] = []
self.response_code = None
def configure(self, **configs):
pass
@SkipHandler
def handle_request(self, request):
for injector in self.injectors:
load = injector.execute()
log.msg("Load:" + str(load))
self.stats['ServerOverload'].append(injector.metrics)
log.msg("about to trigger:")
for trigger in self.triggers:
log.msg("checking triggers:")
if trigger.match(load):
self.stats['ServerOverload'].append(trigger.metrics)
self.response_code = trigger.get_response()
request.delay += trigger.delay
log.msg("ServerOverload request: " + str(request))
return request
@SkipHandler
def handle_response(self, response):
if self.response_code:
response.code = self.response_code
return response
def _set_triggers(self):
for trigger in self.profile['response_triggers']:
min_load = trigger['fromLoad']
max_load = trigger['toLoad']
for action in trigger['actions']:
if action['type'] == "response":
response = action['value']
prob = action['percentage']
sr_trigger = SimulatedResponseTrigger(min_load,
max_load,
prob,
response
)
self.triggers.append(sr_trigger)
if action['type'] == "delay":
response = action['value']
prob = action['percentage']
d_trigger = DelayedResponseTrigger(min_load,
max_load,
prob,
response
)
self.triggers.append(d_trigger)
def start(self, **params):
self.session_id = params['session_id']
self.profile_id = params['profile_id']
self.profile = SOProfileModel({"_id": self.profile_id})
self.state = "running"
self.status = str(datetime.datetime.now())
self._set_triggers()
injector_type = self.profile['function']['type']
self.injectors.append(injector_map[injector_type](self.profile))
log.msg("Server Overload module started at: " + self.status)
def stop(self, **params):
self.state = "stopped"
self.profile = None
self.status = str(datetime.datetime.now())
self.injectors = []
log.msg("Server Overload module stopped at: " + self.status)
so = ServerOverload()
|
mit
| -6,316,699,127,163,408,000
| 31.059603
| 72
| 0.543689
| false
| 4.528531
| false
| false
| false
|
robofab-developers/fontParts
|
Lib/fontParts/test/test_bPoint.py
|
1
|
33013
|
import unittest
import collections
from fontTools.misc.py23 import basestring
from fontParts.base import FontPartsError
class TestBPoint(unittest.TestCase):
def getBPoint_corner(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_corner_with_bcpOut(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((133, 212), "offcurve")
contour.appendPoint((0, 0), "offcurve")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_corner_with_bcpIn(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((0, 0), "offcurve")
contour.appendPoint((61, 190), "offcurve")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
return bPoint
def getContour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((19, 121), "offcurve")
contour.appendPoint((61, 190), "offcurve")
contour.appendPoint((101, 202), "curve", smooth=True)
contour.appendPoint((133, 212), "offcurve")
contour.appendPoint((155, 147), "offcurve")
contour.appendPoint((255, 147), "curve")
return contour
def getBPoint_curve(self):
contour = self.getContour()
bPoint = contour.bPoints[1]
return bPoint
def getBPoint_curve_firstPointOpenContour(self):
contour = self.getContour()
bPoint = contour.bPoints[0]
return bPoint
def getBPoint_curve_lastPointOpenContour(self):
contour = self.getContour()
bPoint = contour.bPoints[-1]
return bPoint
def getBPoint_withName(self):
bPoint = self.getBPoint_corner()
bPoint.name = "BP"
return bPoint
# ----
# repr
# ----
def test_reprContents(self):
bPoint = self.getBPoint_corner()
value = bPoint._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
def test_reprContents_noContour(self):
point, _ = self.objectGenerator("point")
value = point._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.font)
self.assertEqual(
bPoint.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.layer)
self.assertEqual(
bPoint.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.font)
self.assertIsNone(bPoint.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
glyph.appendContour(contour)
contour = glyph.contours[0]
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.glyph)
self.assertEqual(
bPoint.glyph,
glyph
)
def test_get_parent_noGlyph(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNone(bPoint.glyph)
def test_get_parent_contour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint.contour)
self.assertEqual(
bPoint.contour,
contour
)
def test_get_parent_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.contour)
def test_get_parent_segment(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
self.assertIsNotNone(bPoint._segment)
# def test_get_parent_noSegment(self):
# bPoint, _ = self.objectGenerator("bPoint")
# self.assertIsNone(bPoint._segment)
def test_get_parent_nextSegment(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[2]
self.assertIsNotNone(bPoint._nextSegment)
def test_get_parent_noNextSegment(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint._nextSegment)
# get segment/nosegment
def test_set_parent_contour(self):
contour, _ = self.objectGenerator("contour")
bPoint, _ = self.objectGenerator("bPoint")
bPoint.contour = contour
self.assertIsNotNone(bPoint.contour)
self.assertEqual(
bPoint.contour,
contour
)
def test_set_already_set_parent_contour(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((101, 202), "line")
contour.appendPoint((303, 0), "line")
bPoint = contour.bPoints[1]
contourOther, _ = self.objectGenerator("contour")
with self.assertRaises(AssertionError):
bPoint.contour = contourOther
def test_set_parent_contour_none(self):
bPoint, _ = self.objectGenerator("bPoint")
bPoint.contour = None
self.assertIsNone(bPoint.contour)
def test_get_parent_glyph_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.glyph)
def test_get_parent_layer_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.layer)
def test_get_parent_font_noContour(self):
bPoint, _ = self.objectGenerator("bPoint")
self.assertIsNone(bPoint.font)
# ----
# Attributes
# ----
# type
def test_get_type_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.type,
"corner"
)
def test_get_type_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.type,
"curve"
)
def test_set_type_corner(self):
bPoint = self.getBPoint_curve()
bPoint.type = "corner"
self.assertEqual(
bPoint.type,
"corner"
)
def test_set_type_curve(self):
bPoint = self.getBPoint_corner()
bPoint.type = "curve"
self.assertEqual(
bPoint.type,
"curve"
)
def test_type_not_equal(self):
bPoint = self.getBPoint_corner()
self.assertNotEqual(
bPoint.type,
"curve"
)
def test_set_bcpOutIn_type_change(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = (0, 0)
bPoint.bcpIn = (0, 0)
self.assertEqual(
bPoint.type,
"corner"
)
def test_set_bcpInOut_type_change(self):
bPoint = self.getBPoint_curve()
bPoint.bcpIn = (0, 0)
bPoint.bcpOut = (0, 0)
self.assertEqual(
bPoint.type,
"corner"
)
# anchor
def test_get_anchor(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.anchor,
(101, 202)
)
def test_set_anchor_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.anchor = (51, 45)
self.assertEqual(
bPoint.anchor,
(51, 45)
)
def test_set_anchor_valid_list(self):
bPoint = self.getBPoint_corner()
bPoint.anchor = [51, 45]
self.assertEqual(
bPoint.anchor,
(51, 45)
)
def test_set_anchor_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = (51, 45, 67)
def test_set_anchor_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = [51]
def test_set_anchor_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.anchor = (51,)
def test_set_anchor_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.anchor = 51
def test_set_anchor_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.anchor = None
# bcp in
def test_get_bcpIn_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.bcpIn,
(0, 0)
)
def test_get_bcpIn_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpIn,
(-40, -12)
)
def test_set_bcpIn_corner_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_corner_with_bcpOut(self):
bPoint = self.getBPoint_corner_with_bcpOut()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_curve_valid_tuple(self):
bPoint = self.getBPoint_curve()
bPoint.bcpIn = (51, 45)
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_curve_firstPointOpenContour(self):
bPoint = self.getBPoint_curve_firstPointOpenContour()
with self.assertRaises(FontPartsError):
bPoint.bcpIn = (10, 20)
def test_set_bcpIn_valid_list(self):
bPoint = self.getBPoint_corner()
bPoint.bcpIn = [51, 45]
self.assertEqual(
bPoint.bcpIn,
(51, 45)
)
def test_set_bcpIn_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpIn = [51, 45, 67]
def test_set_bcpIn_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpIn = [51]
def test_set_bcpIn_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = (51)
def test_set_bcpIn_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = 51
def test_set_bcpIn_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpIn = None
# bcp out
def test_get_bcpOut_corner(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.bcpOut,
(0, 0)
)
def test_get_bcpOut_curve(self):
bPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpOut,
(32, 10)
)
def test_set_bcpOut_corner_valid_tuple(self):
bPoint = self.getBPoint_corner()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_corner_with_bcpIn(self):
bPoint = self.getBPoint_corner_with_bcpIn()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_curve_valid_tuple(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = (51, 45)
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_valid_list(self):
bPoint = self.getBPoint_curve()
bPoint.bcpOut = [51, 45]
self.assertEqual(
bPoint.bcpOut,
(51, 45)
)
def test_set_bcpOut_curve_lastPointOpenContour(self):
bPoint = self.getBPoint_curve_lastPointOpenContour()
with self.assertRaises(FontPartsError):
bPoint.bcpOut = (10, 20)
def test_set_bcpOut_invalid_too_many_items(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpOut = [51, 45, 67]
def test_set_bcpOut_invalid_single_item_list(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(ValueError):
bPoint.bcpOut = [51]
def test_set_bcpOut_invalid_single_item_tuple(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = (51)
def test_set_bcpOut_invalidType_int(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = 51
def test_set_bcpOut_invalidType_None(self):
bPoint = self.getBPoint_corner()
with self.assertRaises(TypeError):
bPoint.bcpOut = None
# --------------
# Identification
# --------------
# index
def getBPoint_noParentContour(self):
bPoint, _ = self.objectGenerator("bPoint")
bPoint.anchor = (101, 202)
bPoint.bcpIn = (-40, 0)
bPoint.bcpOut = (50, 0)
bPoint.type = "curve"
return bPoint
def test_get_index(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
bPoint.index,
1
)
# def test_get_index_noParentContour(self):
# bPoint = self.getBPoint_noParentContour()
# self.assertEqual(
# bPoint.index,
# None
# )
def test_set_index(self):
point = self.getBPoint_corner()
with self.assertRaises(FontPartsError):
point.index = 0
# identifier
def test_identifier_get_none(self):
bPoint = self.getBPoint_corner()
self.assertIsNone(bPoint.identifier)
def test_identifier_generated_type(self):
bPoint = self.getBPoint_corner()
bPoint.generateIdentifier()
self.assertIsInstance(bPoint.identifier, basestring)
def test_identifier_consistency(self):
bPoint = self.getBPoint_corner()
bPoint.generateIdentifier()
# get: twice to test consistency
self.assertEqual(bPoint.identifier, bPoint.identifier)
def test_identifier_cannot_set(self):
# identifier is a read-only property
bPoint = self.getBPoint_corner()
with self.assertRaises(FontPartsError):
bPoint.identifier = "ABC"
# def test_getIdentifer_no_contour(self):
# bPoint, _ = self.objectGenerator("bPoint")
# with self.assertRaises(FontPartsError):
# bPoint.getIdentifier()
def test_getIdentifer_consistency(self):
bPoint = self.getBPoint_corner()
bPoint.getIdentifier()
self.assertEqual(bPoint.identifier, bPoint.getIdentifier())
# ----
# Hash
# ----
def test_hash(self):
bPoint = self.getBPoint_corner()
self.assertEqual(
isinstance(bPoint, collections.Hashable),
False
)
# --------
# Equality
# --------
def test_object_equal_self(self):
bPoint_one = self.getBPoint_corner()
self.assertEqual(
bPoint_one,
bPoint_one
)
def test_object_not_equal_other(self):
bPoint_one = self.getBPoint_corner()
bPoint_two = self.getBPoint_corner()
self.assertNotEqual(
bPoint_one,
bPoint_two
)
def test_object_equal_self_variable_assignment(self):
bPoint_one = self.getBPoint_corner()
a = bPoint_one
a.anchor = (51, 45)
self.assertEqual(
bPoint_one,
a
)
def test_object_not_equal_other_variable_assignment(self):
bPoint_one = self.getBPoint_corner()
bPoint_two = self.getBPoint_corner()
a = bPoint_one
self.assertNotEqual(
bPoint_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
bPoint = self.getBPoint_corner()
try:
bPoint.selected = False
except NotImplementedError:
return
bPoint.selected = True
self.assertEqual(
bPoint.selected,
True
)
def test_selected_false(self):
bPoint = self.getBPoint_corner()
try:
bPoint.selected = False
except NotImplementedError:
return
bPoint.selected = False
self.assertEqual(
bPoint.selected,
False
)
# ----
# Copy
# ----
def test_copy_seperate_objects(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertIsNot(
bPoint,
copied
)
def test_copy_different_contour(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertIsNot(
bPoint.contour,
copied.contour
)
def test_copy_none_contour(self):
bPoint = self.getBPoint_corner()
copied = bPoint.copy()
self.assertEqual(
copied.contour,
None
)
# def test_copy_same_type(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.type,
# copied.type
# )
# def test_copy_same_anchor(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.anchor,
# copied.anchor
# )
# def test_copy_same_bcpIn(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.bcpIn,
# copied.bcpIn
# )
# def test_copy_same_bcpOut(self):
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.bcpOut,
# copied.bcpOut
# )
# def test_copy_same_identifier_None(self):
# bPoint = self.getBPoint_corner()
# bPoint.identifer = None
# copied = bPoint.copy()
# self.assertEqual(
# bPoint.identifier,
# copied.identifier,
# )
# def test_copy_different_identifier(self):
# bPoint = self.getBPoint_corner()
# bPoint.generateIdentifier()
# copied = bPoint.copy()
# self.assertNotEqual(
# bPoint.identifier,
# copied.identifier,
# )
# def test_copy_generated_identifier_different(self):
# otherContour, _ = self.objectGenerator("contour")
# bPoint = self.getBPoint_corner()
# copied = bPoint.copy()
# copied.contour = otherContour
# bPoint.generateIdentifier()
# copied.generateIdentifier()
# self.assertNotEqual(
# bPoint.identifier,
# copied.identifier
# )
# def test_copyData_type(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.type,
# bPointOther.type,
# )
# def test_copyData_anchor(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.anchor,
# bPointOther.anchor,
# )
# def test_copyData_bcpIn(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.bcpIn,
# bPointOther.bcpIn,
# )
# def test_copyData_bcpOut(self):
# bPoint = self.getBPoint_corner()
# bPointOther, _ = self.objectGenerator("bPoint")
# bPointOther.copyData(bPoint)
# self.assertEqual(
# bPoint.bcpOut,
# bPointOther.bcpOut,
# )
# --------------
# Transformation
# --------------
# transformBy
def test_transformBy_valid_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.anchor,
(199.0, 608.0)
)
def test_transformBy_valid_no_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.bcpIn,
(-80.0, -36.0)
)
def test_transformBy_valid_no_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
bPoint.bcpOut,
(64.0, 30.0)
)
def test_transformBy_valid_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.anchor,
(201.0, 402.0)
)
def test_transformBy_valid_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.bcpIn,
(-80.0, -24.0)
)
def test_transformBy_valid_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
bPoint.bcpOut,
(64.0, 20.0)
)
def test_transformBy_invalid_one_string_value(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy((1, 0, 0, 1, 0, "0"))
def test_transformBy_invalid_all_string_values(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy("1, 0, 0, 1, 0, 0")
def test_transformBy_invalid_int_value(self):
point = self.getBPoint_curve()
with self.assertRaises(TypeError):
point.transformBy(123)
# moveBy
def test_moveBy_valid_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
self.assertEqual(
bPoint.anchor,
(100.0, 204.0)
)
def test_moveBy_noChange_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
otherBPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpIn,
otherBPoint.bcpIn
)
def test_moveBy_noChange_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.moveBy((-1, 2))
otherBPoint = self.getBPoint_curve()
self.assertEqual(
bPoint.bcpOut,
otherBPoint.bcpOut
)
def test_moveBy_invalid_one_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy((-1, "2"))
def test_moveBy_invalid_all_strings_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy("-1, 2")
def test_moveBy_invalid_int_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.moveBy(1)
# scaleBy
def test_scaleBy_valid_one_value_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2))
self.assertEqual(
bPoint.anchor,
(-202.0, -404.0)
)
def test_scaleBy_valid_two_values_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3))
self.assertEqual(
bPoint.anchor,
(-202.0, 606.0)
)
def test_scaleBy_valid_two_values_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.anchor,
(-199.0, 602.0)
)
def test_scaleBy_valid_two_values_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.bcpIn,
(80.0, -36.0)
)
def test_scaleBy_valid_two_values_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
bPoint.bcpOut,
(-64.0, 30.0)
)
def test_invalid_one_string_value_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.scaleBy((-1, "2"))
def test_invalid_two_string_values_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.scaleBy("-1, 2")
def test_invalid_tuple_too_many_values_scaleBy(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.scaleBy((-1, 2, -3))
# rotateBy
def test_rotateBy_valid_no_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45)
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-71.418, 214.253]
)
def test_rotateBy_valid_origin_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-69.711, 214.132]
)
def test_rotateBy_valid_origin_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpIn[0], 3)), (round(bPoint.bcpIn[1], 3))],
[-19.799, -36.77]
)
def test_rotateBy_valid_origin_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.rotateBy(45, origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpOut[0], 3)), (round(bPoint.bcpOut[1], 3))],
[15.556, 29.698]
)
def test_rotateBy_invalid_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.rotateBy("45")
def test_rotateBy_invalid_too_large_value_positive(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.rotateBy(361)
def test_rotateBy_invalid_too_large_value_negative(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.rotateBy(-361)
# skewBy
def test_skewBy_valid_no_origin_one_value_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy(100)
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1044.599, 202.0]
)
def test_skewBy_valid_no_origin_two_values_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1044.599, 238.761]
)
def test_skewBy_valid_origin_one_value_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy(100, origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1033.256, 202.0]
)
def test_skewBy_valid_origin_two_values_anchor(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.anchor[0], 3)), (round(bPoint.anchor[1], 3))],
[-1033.256, 238.397]
)
def test_skewBy_valid_origin_two_values_bcpIn(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpIn[0], 3)), (round(bPoint.bcpIn[1], 3))],
[28.055, -26.559]
)
def test_skewBy_valid_origin_two_values_bcpOut(self):
bPoint = self.getBPoint_curve()
bPoint.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[(round(bPoint.bcpOut[0], 3)), (round(bPoint.bcpOut[1], 3))],
[-24.713, 21.647]
)
def test_skewBy_invalid_string_value(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(TypeError):
bPoint.skewBy("45")
def test_skewBy_invalid_too_large_value_positive(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.skewBy(361)
def test_skewBy_invalid_too_large_value_negative(self):
bPoint = self.getBPoint_curve()
with self.assertRaises(ValueError):
bPoint.skewBy(-361)
# -------------
# Normalization
# -------------
# round
def getBPoint_curve_float(self):
contour, _ = self.objectGenerator("contour")
contour.appendPoint((0, 0), "move")
contour.appendPoint((19.231, 121.291), "offcurve")
contour.appendPoint((61.193, 190.942), "offcurve")
contour.appendPoint((101.529, 202.249), "curve", smooth=True)
contour.appendPoint((133.948, 212.193), "offcurve")
contour.appendPoint((155.491, 147.314), "offcurve")
contour.appendPoint((255.295, 147.314), "curve")
bPoint = contour.bPoints[1]
return bPoint
def test_round_anchor(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.anchor,
(102.0, 202.0)
)
def test_round_bcpIn(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.bcpIn,
(-40.0, -11.0)
)
def test_round_bcpOut(self):
bPoint = self.getBPoint_curve_float()
bPoint.round()
self.assertEqual(
bPoint.bcpOut,
(32.0, 10.0)
)
|
mit
| -2,513,112,687,653,909,500
| 28.822042
| 73
| 0.565111
| false
| 3.549022
| true
| false
| false
|
ayepezv/GAD_ERP
|
addons/account/wizard/account_invoice_refund.py
|
1
|
6614
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.tools.safe_eval import safe_eval
from odoo.exceptions import UserError
class AccountInvoiceRefund(models.TransientModel):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
@api.model
def _get_reason(self):
context = dict(self._context or {})
active_id = context.get('active_id', False)
if active_id:
inv = self.env['account.invoice'].browse(active_id)
return inv.name
return ''
date_invoice = fields.Date(string='Refund Date', default=fields.Date.context_today, required=True)
date = fields.Date(string='Accounting Date')
description = fields.Char(string='Reason', required=True, default=_get_reason)
refund_only = fields.Boolean(string='Technical field to hide filter_refund in case invoice is partially paid', compute='_get_refund_only')
filter_refund = fields.Selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'), ('modify', 'Modify: create refund, reconcile and create a new draft invoice')],
default='refund', string='Refund Method', required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled')
@api.depends('date_invoice')
@api.one
def _get_refund_only(self):
invoice_id = self.env['account.invoice'].browse(self._context.get('active_id',False))
if len(invoice_id.payment_move_line_ids) != 0 and invoice_id.state != 'paid':
self.refund_only = True
else:
self.refund_only = False
@api.multi
def compute_refund(self, mode='refund'):
inv_obj = self.env['account.invoice']
inv_tax_obj = self.env['account.invoice.tax']
inv_line_obj = self.env['account.invoice.line']
context = dict(self._context or {})
xml_id = False
for form in self:
created_inv = []
date = False
description = False
for inv in inv_obj.browse(context.get('active_ids')):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise UserError(_('Cannot refund draft/proforma/cancelled invoice.'))
if inv.reconciled and mode in ('cancel', 'modify'):
raise UserError(_('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'))
date = form.date or False
description = form.description or inv.name
refund = inv.refund(form.date_invoice, date, description, inv.journal_id.id)
refund.compute_taxes()
created_inv.append(refund.id)
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_ids
to_reconcile_ids = {}
to_reconcile_lines = self.env['account.move.line']
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_lines += line
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconciled:
line.remove_move_reconcile()
refund.signal_workflow('invoice_open')
for tmpline in refund.move_id.line_ids:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_lines += tmpline
to_reconcile_lines.reconcile()
if mode == 'modify':
invoice = inv.read(
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term_id', 'account_id',
'currency_id', 'invoice_line_ids', 'tax_line_ids',
'journal_id', 'date'])
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(invoice['invoice_line_ids'])
invoice_lines = inv_obj.with_context(mode='modify')._refund_cleanup_lines(invoice_lines)
tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
invoice.update({
'type': inv.type,
'date_invoice': form.date_invoice,
'state': 'draft',
'number': False,
'invoice_line_ids': invoice_lines,
'tax_line_ids': tax_lines,
'date': date,
'name': description,
'origin': inv.origin,
'fiscal_position': inv.fiscal_position.id,
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term_id', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_refund = inv_obj.create(invoice)
if inv_refund.payment_term_id.id:
inv_refund._onchange_payment_term_date_invoice()
created_inv.append(inv_refund.id)
xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
(inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
# Put the reason in the chatter
subject = _("Invoice refund")
body = description
refund.message_post(body=body, subject=subject)
if xml_id:
result = self.env.ref('account.%s' % (xml_id)).read()[0]
invoice_domain = safe_eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
return True
@api.multi
def invoice_refund(self):
data_refund = self.read(['filter_refund'])[0]['filter_refund']
return self.compute_refund(data_refund)
|
gpl-3.0
| -2,647,333,404,067,818,500
| 50.271318
| 205
| 0.518597
| false
| 4.456873
| false
| false
| false
|
akx/shoop
|
shoop/core/fields/tagged_json.py
|
1
|
3243
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
"Tagged JSON" encoder/decoder.
Objects that are normally not unambiguously representable via JSON
are encoded into special objects of the form `{tag: val}`; the encoding
and decoding process can be customized however necessary.
"""
from __future__ import unicode_literals
import datetime
import decimal
from enum import Enum
import django.utils.dateparse as dateparse
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from jsonfield.encoder import JSONEncoder
from six import text_type
from shoop.utils.importing import load
from shoop.utils.iterables import first
def isoformat(obj):
return obj.isoformat()
def encode_enum(enum_val):
enum_cls = enum_val.__class__
spec = "%s:%s" % (enum_cls.__module__, enum_cls.__name__)
try:
if load(spec) != enum_cls:
raise ImproperlyConfigured("That's not the same class!")
except ImproperlyConfigured: # Also raised by `load`
return enum_val.value # Fall back to the bare value.
return [spec, enum_val.value]
def decode_enum(val):
spec, value = val
cls = load(spec)
if issubclass(cls, Enum):
return cls(value)
return value # Fall back to the bare value. Not optimal, I know.
class TagRegistry(object):
def __init__(self):
self.tags = {}
def register(self, tag, classes, encoder=text_type, decoder=None):
if decoder is None:
if isinstance(classes, (list, tuple)):
decoder = classes[0]
else:
decoder = classes
if not callable(decoder):
raise ValueError("Decoder %r for tag %r is not callable" % (decoder, tag))
if not callable(encoder):
raise ValueError("Encoder %r for tag %r is not callable" % (encoder, tag))
self.tags[tag] = {
"classes": classes,
"encoder": encoder,
"decoder": decoder
}
def encode(self, obj, default):
for tag, info in six.iteritems(self.tags):
if isinstance(obj, info["classes"]):
return {tag: info["encoder"](obj)}
return default(obj)
def decode(self, obj):
if len(obj) == 1:
tag, val = first(obj.items())
info = self.tags.get(tag)
if info:
return info["decoder"](val)
return obj
#: The default tag registry.
tag_registry = TagRegistry()
tag_registry.register("$datetime", datetime.datetime, encoder=isoformat, decoder=dateparse.parse_datetime)
tag_registry.register("$date", datetime.date, encoder=isoformat, decoder=dateparse.parse_date)
tag_registry.register("$time", datetime.time, encoder=isoformat, decoder=dateparse.parse_time)
tag_registry.register("$dec", decimal.Decimal)
tag_registry.register("$enum", Enum, encoder=encode_enum, decoder=decode_enum)
class TaggedJSONEncoder(JSONEncoder):
registry = tag_registry
def default(self, obj):
return self.registry.encode(obj, super(JSONEncoder, self).default)
|
agpl-3.0
| -1,473,840,330,059,314,200
| 30.485437
| 106
| 0.659574
| false
| 3.959707
| false
| false
| false
|
alsgregory/quasi_geostrophic_model
|
demos/demo_two_level_variance_reduction.py
|
1
|
2299
|
""" sample variance decay of two level QG system """
from __future__ import division
from __future__ import absolute_import
from firedrake import *
from quasi_geostrophic_model import *
import numpy as np
import matplotlib.pyplot as plot
# define mesh hierarchy
mesh = UnitSquareMesh(5, 5)
L = 4
mesh_hierarchy = MeshHierarchy(mesh, L)
# define sample size
n = 10
# define variance
variance = 0.125
# define initial condition function
def ic(mesh, xp):
x = SpatialCoordinate(mesh)
ufl_expression = (exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.7, 2) / (2 * pow(0.1, 2)))) -
exp(-(pow(x[0] - 0.5 + xp, 2) / (2 * pow(0.25, 2)) +
pow(x[1] - 0.3, 2) / (2 * pow(0.1, 2)))))
return ufl_expression
sample_variances_difference = np.zeros(L)
finest_fs = FunctionSpace(mesh_hierarchy[-1], 'CG', 1)
for l in range(L):
print 'level: ', l
meshc = mesh_hierarchy[l]
meshf = mesh_hierarchy[l + 1]
# define fs
dg_fs_c = FunctionSpace(meshc, 'DG', 1)
cg_fs_c = FunctionSpace(meshc, 'CG', 1)
dg_fs_f = FunctionSpace(meshf, 'DG', 1)
cg_fs_f = FunctionSpace(meshf, 'CG', 1)
m = Function(finest_fs)
sq = Function(finest_fs)
for j in range(n):
print 'sample: ', j
# set-up system
QG = two_level_quasi_geostrophic(dg_fs_c, cg_fs_c, dg_fs_f, cg_fs_f, variance)
# fixed ic
xp = 0
QG.initial_condition(ic(meshc, xp), ic(meshf, xp))
# time-step
QG.timestepper(3.0)
# prolong coarse and fine
comp_c = Function(finest_fs)
comp_f = Function(finest_fs)
prolong(QG.psi_[0], comp_c)
if l < L - 1:
prolong(QG.psi_[1], comp_f)
else:
comp_f.assign(QG.psi_[1])
m += assemble((comp_f - comp_c) * (1.0 / n))
sq += assemble(((comp_f - comp_c) ** 2) * (1.0 / n))
ff = Function(finest_fs).assign((sq - (m ** 2)))
sample_variances_difference[l] = assemble(ff * dx)
dxf = 1.0 / 2 ** (np.linspace(1, L, L))
plot.loglog(dxf, sample_variances_difference)
plot.loglog(dxf, 1e-9 * dxf ** (4), 'k--')
plot.xlabel('normalized dx of coarse level')
plot.ylabel('sample variance difference')
plot.show()
|
mit
| 1,268,599,031,481,539,800
| 22.701031
| 86
| 0.564158
| false
| 2.803659
| false
| false
| false
|
alphagov/notifications-admin
|
app/navigation.py
|
1
|
10999
|
from itertools import chain
from flask import request
class Navigation:
mapping = {}
selected_class = "selected"
def __init__(self):
self.mapping = {
navigation: {
# if not specified, assume endpoints are all in the `main` blueprint.
self.get_endpoint_with_blueprint(endpoint) for endpoint in endpoints
} for navigation, endpoints in self.mapping.items()
}
@property
def endpoints_with_navigation(self):
return tuple(chain.from_iterable((
endpoints
for navigation_item, endpoints in self.mapping.items()
)))
def is_selected(self, navigation_item):
if request.endpoint in self.mapping[navigation_item]:
return " " + self.selected_class
return ''
@staticmethod
def get_endpoint_with_blueprint(endpoint):
return endpoint if '.' in endpoint else 'main.{}'.format(endpoint)
class HeaderNavigation(Navigation):
mapping = {
'support': {
'bat_phone',
'feedback',
'support',
'support_public',
'thanks',
'triage',
},
'features': {
'features',
'features_email',
'features_letters',
'features_sms',
'message_status',
'roadmap',
'security',
'terms',
'trial_mode_new',
'using_notify',
},
'pricing': {
'pricing',
'how_to_pay',
},
'documentation': {
'documentation',
'integration_testing',
},
'user-profile': {
'user_profile',
'user_profile_email',
'user_profile_email_authenticate',
'user_profile_email_confirm',
'user_profile_mobile_number',
'user_profile_mobile_number_authenticate',
'user_profile_mobile_number_confirm',
'user_profile_name',
'user_profile_password',
'user_profile_disable_platform_admin_view',
},
'platform-admin': {
'archive_user',
'clear_cache',
'create_email_branding',
'create_letter_branding',
'edit_sms_provider_ratio',
'email_branding',
'find_services_by_name',
'find_users_by_email',
'letter_branding',
'live_services',
'live_services_csv',
'notifications_sent_by_service',
'get_billing_report',
'organisations',
'platform_admin',
'platform_admin_list_complaints',
'platform_admin_reports',
'platform_admin_returned_letters',
'platform_admin_splash_page',
'suspend_service',
'trial_services',
'update_email_branding',
'update_letter_branding',
'user_information',
'view_provider',
'view_providers',
},
'sign-in': {
'revalidate_email_sent',
'sign_in',
'two_factor_sms',
'two_factor_email',
'two_factor_email_sent',
'two_factor_email_interstitial',
'two_factor_webauthn',
'verify',
'verify_email',
},
}
# header HTML now comes from GOVUK Frontend so requires a boolean, not an attribute
def is_selected(self, navigation_item):
return request.endpoint in self.mapping[navigation_item]
class MainNavigation(Navigation):
mapping = {
'dashboard': {
'broadcast_tour',
'conversation',
'inbox',
'monthly',
'returned_letter_summary',
'returned_letters',
'service_dashboard',
'template_usage',
'view_notification',
'view_notifications',
},
'current-broadcasts': {
'broadcast_dashboard',
'broadcast_dashboard_updates',
'view_current_broadcast',
'new_broadcast',
'write_new_broadcast',
},
'previous-broadcasts': {
'broadcast_dashboard_previous',
'view_previous_broadcast',
},
'rejected-broadcasts': {
'broadcast_dashboard_rejected',
'view_rejected_broadcast',
},
'templates': {
'action_blocked',
'add_service_template',
'check_messages',
'check_notification',
'choose_from_contact_list',
'choose_template',
'choose_template_to_copy',
'confirm_redact_template',
'conversation_reply',
'copy_template',
'delete_service_template',
'edit_service_template',
'edit_template_postage',
'manage_template_folder',
'send_messages',
'send_one_off',
'send_one_off_letter_address',
'send_one_off_step',
'send_one_off_to_myself',
'no_cookie.send_test_preview',
'set_sender',
'set_template_sender',
'view_template',
'view_template_version',
'view_template_versions',
'broadcast',
'preview_broadcast_areas',
'choose_broadcast_library',
'choose_broadcast_area',
'choose_broadcast_sub_area',
'remove_broadcast_area',
'preview_broadcast_message',
'approve_broadcast_message',
'reject_broadcast_message',
'cancel_broadcast_message',
},
'uploads': {
'upload_contact_list',
'check_contact_list',
'save_contact_list',
'contact_list',
'delete_contact_list',
'upload_letter',
'uploaded_letter_preview',
'uploaded_letters',
'uploads',
'view_job',
'view_jobs',
},
'team-members': {
'confirm_edit_user_email',
'confirm_edit_user_mobile_number',
'edit_user_email',
'edit_user_mobile_number',
'edit_user_permissions',
'invite_user',
'manage_users',
'remove_user_from_service',
},
'usage': {
'usage',
},
'settings': {
'add_organisation_from_gp_service',
'add_organisation_from_nhs_local_service',
'branding_request',
'estimate_usage',
'link_service_to_organisation',
'request_to_go_live',
'service_add_email_reply_to',
'service_add_letter_contact',
'service_add_sms_sender',
'service_agreement',
'service_accept_agreement',
'service_confirm_agreement',
'service_confirm_delete_email_reply_to',
'service_confirm_delete_letter_contact',
'service_confirm_delete_sms_sender',
'service_edit_email_reply_to',
'service_edit_letter_contact',
'service_edit_sms_sender',
'service_email_reply_to',
'service_letter_contact_details',
'service_make_blank_default_letter_contact',
'service_name_change',
'service_name_change_confirm',
'service_preview_email_branding',
'service_preview_letter_branding',
'service_set_auth_type',
'service_set_channel',
'send_files_by_email_contact_details',
'service_confirm_broadcast_account_type',
'service_set_broadcast_channel',
'service_set_broadcast_network',
'service_set_email_branding',
'service_set_inbound_number',
'service_set_inbound_sms',
'service_set_international_letters',
'service_set_international_sms',
'service_set_letters',
'service_set_reply_to_email',
'service_set_sms_prefix',
'service_verify_reply_to_address',
'service_verify_reply_to_address_updates',
'service_settings',
'service_sms_senders',
'set_free_sms_allowance',
'set_message_limit',
'set_rate_limit',
'service_set_letter_branding',
'submit_request_to_go_live',
},
'api-integration': {
'api_callbacks',
'api_documentation',
'api_integration',
'api_keys',
'create_api_key',
'delivery_status_callback',
'received_text_messages_callback',
'revoke_api_key',
'guest_list',
'old_guest_list',
},
}
class CaseworkNavigation(Navigation):
mapping = {
'dashboard': {
'broadcast_tour',
'broadcast_dashboard',
'broadcast_dashboard_previous',
'broadcast_dashboard_updates',
},
'send-one-off': {
'choose_from_contact_list',
'choose_template',
'send_one_off',
'send_one_off_letter_address',
'send_one_off_step',
'send_one_off_to_myself',
},
'sent-messages': {
'view_notifications',
'view_notification',
},
'uploads': {
'view_jobs',
'view_job',
'upload_contact_list',
'check_contact_list',
'save_contact_list',
'contact_list',
'delete_contact_list',
'upload_letter',
'uploaded_letter_preview',
'uploaded_letters',
'uploads',
},
}
class OrgNavigation(Navigation):
mapping = {
'dashboard': {
'organisation_dashboard',
},
'settings': {
'confirm_edit_organisation_name',
'edit_organisation_agreement',
'edit_organisation_billing_details',
'edit_organisation_crown_status',
'edit_organisation_domains',
'edit_organisation_email_branding',
'edit_organisation_letter_branding',
'edit_organisation_domains',
'edit_organisation_go_live_notes',
'edit_organisation_name',
'edit_organisation_notes',
'edit_organisation_type',
'organisation_preview_email_branding',
'organisation_preview_letter_branding',
'organisation_settings',
},
'team-members': {
'edit_user_org_permissions',
'invite_org_user',
'manage_org_users',
'remove_user_from_organisation',
},
'trial-services': {
'organisation_trial_mode_services',
}
}
|
mit
| -5,444,905,363,476,817,000
| 30.15864
| 87
| 0.500864
| false
| 4.404886
| false
| false
| false
|
adowaconan/Spindle_by_Graphical_Features
|
duplicate/Generate_Features (adowaconan).py
|
1
|
5970
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 17 12:35:28 2017
@author: ning
"""
#import mne
import numpy as np
import pandas as pd
import os
from time import time
#import networkx as nx
from collections import Counter
os.chdir('D:\\NING - spindle\\Spindle_by_Graphical_Features')
channelList = ['F3','F4','C3','C4','O1','O2']
import eegPipelineFunctions
raw_dir = 'D:\\NING - spindle\\training set\\'
# get EEG files that have corresponding annotations
raw_files = []
for file in [f for f in os.listdir(raw_dir) if ('txt' in f)]:
sub = int(file.split('_')[0][3:])
if sub < 11:
day = file.split('_')[1][1]
day_for_load = file.split('_')[1][:2]
else:
day = file.split('_')[2][-1]
day_for_load = file.split('_')[2]
raw_file = [f for f in os.listdir(raw_dir) if (file.split('_')[0] in f) and (day_for_load in f) and ('fif' in f)]
if len(raw_file) != 0:
raw_files.append([raw_dir + raw_file[0],raw_dir + file])
# directory for storing all the feature files
raw_dir = 'D:\\NING - spindle\\training set\\road_trip\\'
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
# initialize the range of the parameters we want to compute based on
epoch_lengths = np.arange(1.,5.,0.2) # 1. to 5 seconds with 0.5 stepsize
plv_thresholds = np.arange(0.6, 0.85, 0.05) # 0.6 to 0.8 with .05
pli_thresholds = np.arange(0.05,0.30, 0.05) # 0.05 to 0.25 with 0.05
cc_thresholds = np.arange(0.7, 0.95,0.05) # 0.7 to 0.9 with 0.05
# make sub-directories based on epoch length
first_level_directory = []
for epoch_length in epoch_lengths:
directory_1 = raw_dir + 'epoch_length '+str(epoch_length)+'\\'
if not os.path.exists(directory_1):
os.makedirs(directory_1)
first_level_directory.append(directory_1)
os.chdir(directory_1)
#print(os.getcwd())
for files in raw_files:
raw_file, annotation_file = files
temp_anno = annotation_file.split('\\')[-1]
sub = int(temp_anno.split('_')[0][3:])
if sub < 11:
day = temp_anno.split('_')[1][1]
day_for_load = temp_anno.split('_')[1][:2]
else:
day = temp_anno.split('_')[2][-1]
day_for_load = temp_anno.split('_')[2]
directory_2 = directory_1 + 'sub' + str(sub) + 'day' + day + '\\'
if not os.path.exists(directory_2):
#print(directory_2)
os.makedirs(directory_2)
os.chdir(directory_2)
# epoch the data
epochs,label,_ = eegPipelineFunctions.get_data_ready(raw_file,channelList,
annotation_file,
epoch_length=epoch_length)
print(Counter(label))
# extract signal features
ssssss = time()
print('extracting signal features ......')
epochFeature = eegPipelineFunctions.featureExtraction(epochs,)
epochFeature = pd.DataFrame(epochFeature)
epochFeature['label']=label
epochFeature.to_csv('sub'+str(sub)+'day'+day+'_'+str(epoch_length)+'_'+'epoch_features.csv',index=False)
# compute adjasency matrices based on epochs
connectivity = eegPipelineFunctions.connectivity(epochs)
connectivity = np.array(connectivity)
plv, pli, cc = connectivity[0,:,:,:],connectivity[1,:,:,:],connectivity[2,:,:,:]
# pre-thresholding graph features
print('extracting graph features of plv ........')
plv_pre_threshold = eegPipelineFunctions.extractGraphFeatures(plv)
plv_pre_threshold['label']=label
print('extracting graph features of pli ........')
pli_pre_threshold = eegPipelineFunctions.extractGraphFeatures(pli)
pli_pre_threshold['label']=label
print('extracting graph features of cc .........')
cc_pre_threshold = eegPipelineFunctions.extractGraphFeatures(cc )
cc_pre_threshold['label']=label
plv_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'plv_features.csv',index=False)
pli_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'pli_features.csv',index=False)
cc_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'cc_features.csv',index=False)
eeeeee = time()
print('done signal, plv, pli, and cc, cost time: %d s'%(eeeeee - ssssss))
# print('start thresholding')
# # extract graph features
# for t_plv,t_pli,t_cc in zip(plv_thresholds,pli_thresholds,cc_thresholds):
# # convert adjasency matrices to binary adjasency matrices
# adj_plv = eegPipelineFunctions.thresholding(t_plv,plv)
# adj_pli = eegPipelineFunctions.thresholding(t_pli,pli)
# adj_cc = eegPipelineFunctions.thresholding(t_cc, cc )
# # this is how we extract graph features
# graphFeature_plv = eegPipelineFunctions.extractGraphFeatures(adj_plv)
# graphFeature_pli = eegPipelineFunctions.extractGraphFeatures(adj_pli)
# graphFeature_cc = eegPipelineFunctions.extractGraphFeatures(adj_cc )
# # prepare the sub-directories for storing feature files
# plv_dir = directory_2 + 'plv_' + str(t_plv) + '\\'
# pli_dir = directory_2 + 'pli_' + str(t_pli) + '\\'
# cc_dir = directory_2 + 'cc_' + str(t_cc ) + '\\'
# if not os.path.exists(plv_dir):
# os.makedirs(plv_dir)
# if not os.path.exists(pli_dir):
# os.makedirs(pli_dir)
# if not os.path.exists(cc_dir):
# os.makedirs(cc_dir)
# # saving csvs
# pd.concat([epochFeature,graphFeature_plv],axis=1).to_csv(plv_dir + 'plv_' + str(t_plv) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_pli],axis=1).to_csv(pli_dir + 'pli_' + str(t_pli) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_cc ],axis=1).to_csv(cc_dir + 'cc_' + str(t_cc ) + '.csv',index=False)
|
mit
| 4,144,034,402,099,314,700
| 46.388889
| 121
| 0.597655
| false
| 3.256956
| false
| false
| false
|
zeekay/elemental
|
elemental/js.py
|
1
|
3175
|
from sys import modules
from core import Element as Element
class js(Element):
tag = 'js'
def __init__(self, script='', url=''):
if script:
self.format = '<script type="text/javascript">%s</script>' % script
elif url:
self.format = '<script type="text/javascript" src="%s"></script>' % url
super(js, self).__init__()
def render_this(self):
return self.format
class js_lib(Element):
tag = 'js_lib'
url = '/js/{version}/app.js'
version = '0.1'
def __init__(self, url='', version=''):
if url:
self.url = url
if version:
self.version = version
super(js_lib, self).__init__()
@property
def format(self):
return ''.join(['<script src="', self.url, '"></script>'])
def render_this(self):
return self.format.format(version=self.version)
class jquery(js_lib):
tag = 'jquery'
url = '//ajax.googleapis.com/ajax/libs/jquery/{version}/jquery.min.js'
version = '1.6.2'
_cdnjs = [x.split() for x in """
xuijs 2.0.0 xui.min.js
css3finalize 1.43 jquery.css3finalize.min.js
processing.js 1.2.1 processing-api.min.js
prototype 1.7.0.0 prototype.js
camanjs 2.2 caman.full.min.js
noisy 1.0 jquery.noisy.min.js
modernizr 2.0.6 modernizr.min.js
string_score 0.1.10 string_score.min.js
mustache.js 0.3.0 mustache.min.js
dojo 1.6.0 dojo.xd.js
ext-core 3.1.0 ext-core.js
sizzle 1.4.4 sizzle.min.js
graphael 0.4.1 g.raphael-min.js
ocanvas 1.0 ocanvas.min.js
jqueryui 1.8.13 jquery-ui.min.js
spinejs 0.0.4 spine.min.js
galleria 1.2.3 galleria.min.js
less.js 1.1.3 less-1.1.3.min.js
underscore.js 1.1.7 underscore-min.js
highcharts 2.1.6 highcharts.js
flexie 1.0.0 flexie.min.js
waypoints 1.1 waypoints.min.js
yepnope 1.0.1 yepnope.min.js
mootools 1.3.2 mootools-yui-compressed.js
script.js 1.3 script.min.js
handlebars.js 1.0.0.beta2 handlebars.min.js
json2 20110223 json2.js
cufon 1.09i cufon-yui.js
zepto 0.6 zepto.min.js
chrome-frame 1.0.2 CFInstall.min.js
selectivizr 1.0.2 selectivizr-min.js
sammy.js 0.6.3 sammy.min.js
es5-shim 1.2.4 es5-shim.min.js
js-signals 0.6.1 js-signals.min.js
raphael 1.5.2 raphael-min.js
yui 3.3.0 yui-min.js
underscore.string 1.1.4 underscore.string.min.js
labjs 2.0 LAB.min.js
pubnub 3.1.2 pubnub.min.js
backbone.js 0.5.1 backbone-min.js
twitterlib.js 0.9.0 twitterlib.min.js
scriptaculous 1.8.3 scriptaculous.js
headjs 0.96 head.min.js
webfont 1.0.19 webfont.js
require.js 0.24.0 require.min.js
socket.io 0.7.0 socket.io.min.js
knockout 1.2.1 knockout-min.js
""".splitlines() if x]
_cdnjs_url = '//cdnjs.cloudflare.com/ajax/libs/%s/{version}/%s'
for _name, _version, _filename in _cdnjs:
_tag = _name.replace('.','')
_dict = {'tag': _tag,
'url': _cdnjs_url % (_name, _filename),
'version': _version}
setattr(modules[__name__], _tag, type(_tag, (js_lib,), _dict))
def _get_latest_cdnjs():
import requests
import json
data = requests.get('http://www.cdnjs.com/packages.json').read()
packages = json.loads(data)['packages']
for n, v, f in [(x['name'], x['version'], x['filename']) for x in packages if x]:
print n, v, f
|
mit
| -3,511,169,699,096,084,000
| 28.95283
| 85
| 0.654488
| false
| 2.606732
| false
| false
| false
|
visipedia/tf_classification
|
preprocessing/inputs.py
|
1
|
29376
|
# Some of this code came from the https://github.com/tensorflow/models/tree/master/slim
# directory, so lets keep the Google license around for now.
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from easydict import EasyDict
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from preprocessing.decode_example import decode_serialized_example
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return tf.tuple([cropped_image, distort_bbox])
def _largest_size_at_most(height, width, largest_side):
"""Computes new shape with the largest side equal to `largest_side`.
Computes new shape with the largest side equal to `largest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
largest_side: A python integer or scalar `Tensor` indicating the size of
the largest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
largest_side = tf.to_float(largest_side)
scale = tf.cond(tf.greater(height, width),
lambda: largest_side / height,
lambda: largest_side / width)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
class DistortedInputs():
def __init__(self, cfg, add_summaries):
self.cfg = cfg
self.add_summaries = add_summaries
def apply(self, original_image, bboxes, distorted_inputs, image_summaries, current_index):
cfg = self.cfg
add_summaries = self.add_summaries
image_shape = tf.shape(original_image)
image_height = tf.cast(image_shape[0], dtype=tf.float32) # cast so that we can multiply them by the bbox coords
image_width = tf.cast(image_shape[1], dtype=tf.float32)
# First thing we need to do is crop out the bbox region from the image
bbox = bboxes[current_index]
xmin = tf.cast(bbox[0] * image_width, tf.int32)
ymin = tf.cast(bbox[1] * image_height, tf.int32)
xmax = tf.cast(bbox[2] * image_width, tf.int32)
ymax = tf.cast(bbox[3] * image_height, tf.int32)
bbox_width = xmax - xmin
bbox_height = ymax - ymin
image = tf.image.crop_to_bounding_box(
image=original_image,
offset_height=ymin,
offset_width=xmin,
target_height=bbox_height,
target_width=bbox_width
)
image_height = bbox_height
image_width = bbox_width
# Convert the pixel values to be in the range [0,1]
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Add a summary of the original data
if add_summaries:
new_height, new_width = _largest_size_at_most(image_height, image_width, cfg.INPUT_SIZE)
resized_original_image = tf.image.resize_bilinear(tf.expand_dims(image, 0), [new_height, new_width])
resized_original_image = tf.squeeze(resized_original_image)
resized_original_image = tf.image.pad_to_bounding_box(resized_original_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
# If there are multiple boxes for an image, we only want to write to the TensorArray once.
#image_summaries = image_summaries.write(0, tf.expand_dims(resized_original_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(0, tf.expand_dims(resized_original_image, 0)),
lambda: image_summaries.identity()
)
# Extract a distorted bbox
if cfg.DO_RANDOM_CROP > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_crop = tf.less(r, cfg.DO_RANDOM_CROP)
rc_cfg = cfg.RANDOM_CROP_CFG
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
distorted_image, distorted_bbox = tf.cond(do_crop,
lambda: distorted_bounding_box_crop(image, bbox,
aspect_ratio_range=(rc_cfg.MIN_ASPECT_RATIO, rc_cfg.MAX_ASPECT_RATIO),
area_range=(rc_cfg.MIN_AREA, rc_cfg.MAX_AREA),
max_attempts=rc_cfg.MAX_ATTEMPTS),
lambda: tf.tuple([image, bbox])
)
else:
distorted_image = tf.identity(image)
distorted_bbox = tf.constant([[[0.0, 0.0, 1.0, 1.0]]]) # ymin, xmin, ymax, xmax
if cfg.DO_CENTRAL_CROP > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_crop = tf.less(r, cfg.DO_CENTRAL_CROP)
distorted_image = tf.cond(do_crop,
lambda: tf.image.central_crop(distorted_image, cfg.CENTRAL_CROP_FRACTION),
lambda: tf.identity(distorted_image)
)
distorted_image.set_shape([None, None, 3])
# Add a summary
if add_summaries:
image_with_bbox = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distorted_bbox)
new_height, new_width = _largest_size_at_most(image_height, image_width, cfg.INPUT_SIZE)
resized_image_with_bbox = tf.image.resize_bilinear(image_with_bbox, [new_height, new_width])
resized_image_with_bbox = tf.squeeze(resized_image_with_bbox)
resized_image_with_bbox = tf.image.pad_to_bounding_box(resized_image_with_bbox, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
#image_summaries = image_summaries.write(1, tf.expand_dims(resized_image_with_bbox, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(1, tf.expand_dims(resized_image_with_bbox, 0)),
lambda: image_summaries.identity()
)
# Resize the distorted image to the correct dimensions for the network
if cfg.MAINTAIN_ASPECT_RATIO:
shape = tf.shape(distorted_image)
height = shape[0]
width = shape[1]
new_height, new_width = _largest_size_at_most(height, width, cfg.INPUT_SIZE)
else:
new_height = cfg.INPUT_SIZE
new_width = cfg.INPUT_SIZE
num_resize_cases = 1 if cfg.RESIZE_FAST else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [new_height, new_width], method=method),
num_cases=num_resize_cases)
distorted_image = tf.image.pad_to_bounding_box(distorted_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
if add_summaries:
#image_summaries = image_summaries.write(2, tf.expand_dims(distorted_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(2, tf.expand_dims(distorted_image, 0)),
lambda: image_summaries.identity()
)
# Randomly flip the image:
if cfg.DO_RANDOM_FLIP_LEFT_RIGHT > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_flip = tf.less(r, 0.5)
distorted_image = tf.cond(do_flip, lambda: tf.image.flip_left_right(distorted_image), lambda: tf.identity(distorted_image))
# TODO: Can this be changed so that we don't always distort the colors?
# Distort the colors
if cfg.DO_COLOR_DISTORTION > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_color_distortion = tf.less(r, cfg.DO_COLOR_DISTORTION)
num_color_cases = 1 if cfg.COLOR_DISTORT_FAST else 4
distorted_color_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode=cfg.COLOR_DISTORT_FAST),
num_cases=num_color_cases)
distorted_image = tf.cond(do_color_distortion, lambda: tf.identity(distorted_color_image), lambda: tf.identity(distorted_image))
distorted_image.set_shape([cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
# Add a summary
if add_summaries:
#image_summaries = image_summaries.write(3, tf.expand_dims(distorted_image, 0))
image_summaries = tf.cond(tf.equal(current_index, 0),
lambda: image_summaries.write(3, tf.expand_dims(distorted_image, 0)),
lambda: image_summaries.identity()
)
# Add the distorted image to the TensorArray
distorted_inputs = distorted_inputs.write(current_index, tf.expand_dims(distorted_image, 0))
return [original_image, bboxes, distorted_inputs, image_summaries, current_index + 1]
def check_normalized_box_values(xmin, ymin, xmax, ymax, maximum_normalized_coordinate=1.01, prefix=""):
""" Make sure the normalized coordinates are less than 1
"""
xmin_maximum = tf.reduce_max(xmin)
xmin_assert = tf.Assert(
tf.greater_equal(1.01, xmin_maximum),
['%s, maximum xmin coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), xmin_maximum])
with tf.control_dependencies([xmin_assert]):
xmin = tf.identity(xmin)
ymin_maximum = tf.reduce_max(ymin)
ymin_assert = tf.Assert(
tf.greater_equal(1.01, ymin_maximum),
['%s, maximum ymin coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), ymin_maximum])
with tf.control_dependencies([ymin_assert]):
ymin = tf.identity(ymin)
xmax_maximum = tf.reduce_max(xmax)
xmax_assert = tf.Assert(
tf.greater_equal(1.01, xmax_maximum),
['%s, maximum xmax coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), xmax_maximum])
with tf.control_dependencies([xmax_assert]):
xmax = tf.identity(xmax)
ymax_maximum = tf.reduce_max(ymax)
ymax_assert = tf.Assert(
tf.greater_equal(1.01, ymax_maximum),
['%s, maximum ymax coordinate value is larger '
'than %f: ' % (prefix, maximum_normalized_coordinate), ymax_maximum])
with tf.control_dependencies([ymax_assert]):
ymax = tf.identity(ymax)
return xmin, ymin, xmax, ymax
def expand_bboxes(xmin, xmax, ymin, ymax, cfg):
"""
Expand the bboxes.
"""
w = xmax - xmin
h = ymax - ymin
w = w * cfg.WIDTH_EXPANSION_FACTOR
h = h * cfg.HEIGHT_EXPANSION_FACTOR
half_w = w / 2.
half_h = h / 2.
xmin = tf.clip_by_value(xmin - half_w, 0, 1)
xmax = tf.clip_by_value(xmax + half_w, 0, 1)
ymin = tf.clip_by_value(ymin - half_h, 0, 1)
ymax = tf.clip_by_value(ymax + half_h, 0, 1)
return tf.tuple([xmin, xmax, ymin, ymax])
def get_region_data(serialized_example, cfg, fetch_ids=True, fetch_labels=True, fetch_text_labels=True, read_filename=False):
"""
Return the image, an array of bounding boxes, and an array of ids.
"""
feature_dict = {}
if cfg.REGION_TYPE == 'bbox':
bbox_cfg = cfg.BBOX_CFG
features_to_extract = [('image/object/bbox/xmin', 'xmin'),
('image/object/bbox/xmax', 'xmax'),
('image/object/bbox/ymin', 'ymin'),
('image/object/bbox/ymax', 'ymax'),
('image/object/bbox/ymax', 'ymax')]
if read_filename:
features_to_extract.append(('image/filename', 'filename'))
else:
features_to_extract.append(('image/encoded', 'image'))
if fetch_ids:
features_to_extract.append(('image/object/id', 'id'))
if fetch_labels:
features_to_extract.append(('image/object/bbox/label', 'label'))
if fetch_text_labels:
features_to_extract.append(('image/object/bbox/text', 'text'))
features = decode_serialized_example(serialized_example, features_to_extract)
if read_filename:
image_buffer = tf.read_file(features['filename'])
image = tf.image.decode_jpeg(image_buffer, channels=3)
else:
image = features['image']
feature_dict['image'] = image
xmin = tf.expand_dims(features['xmin'], 0)
ymin = tf.expand_dims(features['ymin'], 0)
xmax = tf.expand_dims(features['xmax'], 0)
ymax = tf.expand_dims(features['ymax'], 0)
xmin, ymin, xmax, ymax = check_normalized_box_values(xmin, ymin, xmax, ymax, prefix="From tfrecords ")
if 'DO_EXPANSION' in bbox_cfg and bbox_cfg.DO_EXPANSION > 0:
r = tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32)
do_expansion = tf.less(r, bbox_cfg.DO_EXPANSION)
xmin, xmax, ymin, ymax = tf.cond(do_expansion,
lambda: expand_bboxes(xmin, xmax, ymin, ymax, bbox_cfg.EXPANSION_CFG),
lambda: tf.tuple([xmin, xmax, ymin, ymax])
)
xmin, ymin, xmax, ymax = check_normalized_box_values(xmin, ymin, xmax, ymax, prefix="After expansion ")
# combine the bounding boxes
bboxes = tf.concat(values=[xmin, ymin, xmax, ymax], axis=0)
# order the bboxes so that they have the shape: [num_bboxes, bbox_coords]
bboxes = tf.transpose(bboxes, [1, 0])
feature_dict['bboxes'] = bboxes
if fetch_ids:
ids = features['id']
feature_dict['ids'] = ids
if fetch_labels:
labels = features['label']
feature_dict['labels'] = labels
if fetch_text_labels:
text = features['text']
feature_dict['text'] = text
elif cfg.REGION_TYPE == 'image':
features_to_extract = []
if read_filename:
features_to_extract.append(('image/filename', 'filename'))
else:
features_to_extract.append(('image/encoded', 'image'))
if fetch_ids:
features_to_extract.append(('image/id', 'id'))
if fetch_labels:
features_to_extract.append(('image/class/label', 'label'))
if fetch_text_labels:
features_to_extract.append(('image/class/text', 'text'))
features = decode_serialized_example(serialized_example, features_to_extract)
if read_filename:
image_buffer = tf.read_file(features['filename'])
image = tf.image.decode_jpeg(image_buffer, channels=3)
else:
image = features['image']
feature_dict['image'] = image
bboxes = tf.constant([[0.0, 0.0, 1.0, 1.0]])
feature_dict['bboxes'] = bboxes
if fetch_ids:
ids = [features['id']]
feature_dict['ids'] = ids
if fetch_labels:
labels = [features['label']]
feature_dict['labels'] = labels
if fetch_text_labels:
text = [features['text']]
feature_dict['text'] = text
else:
raise ValueError("Unknown REGION_TYPE: %s" % (cfg.REGION_TYPE,))
return feature_dict
def bbox_crop_loop_cond(original_image, bboxes, distorted_inputs, image_summaries, current_index):
num_bboxes = tf.shape(bboxes)[0]
return current_index < num_bboxes
def get_distorted_inputs(original_image, bboxes, cfg, add_summaries):
distorter = DistortedInputs(cfg, add_summaries)
num_bboxes = tf.shape(bboxes)[0]
distorted_inputs = tf.TensorArray(
dtype=tf.float32,
size=num_bboxes,
element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
)
if add_summaries:
image_summaries = tf.TensorArray(
dtype=tf.float32,
size=4,
element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
)
else:
image_summaries = tf.constant([])
current_index = tf.constant(0, dtype=tf.int32)
loop_vars = [original_image, bboxes, distorted_inputs, image_summaries, current_index]
original_image, bboxes, distorted_inputs, image_summaries, current_index = tf.while_loop(
cond=bbox_crop_loop_cond,
body=distorter.apply,
loop_vars=loop_vars,
parallel_iterations=10, back_prop=False, swap_memory=False
)
distorted_inputs = distorted_inputs.concat()
if add_summaries:
tf.summary.image('0.original_image', image_summaries.read(0))
tf.summary.image('1.image_with_random_crop', image_summaries.read(1))
tf.summary.image('2.cropped_resized_image', image_summaries.read(2))
tf.summary.image('3.final_distorted_image', image_summaries.read(3))
return distorted_inputs
def create_training_batch(serialized_example, cfg, add_summaries, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=False,
fetch_labels=True, fetch_text_labels=False, read_filename=read_filenames)
original_image = features['image']
bboxes = features['bboxes']
labels = features['labels']
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
distorted_inputs = tf.subtract(distorted_inputs, 0.5)
distorted_inputs = tf.multiply(distorted_inputs, 2.0)
names = ('inputs', 'labels')
tensors = [distorted_inputs, labels]
return [names, tensors]
def create_visualization_batch(serialized_example, cfg, add_summaries, fetch_text_labels=False, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=True,
fetch_labels=True, fetch_text_labels=fetch_text_labels, read_filename=read_filenames)
original_image = features['image']
ids = features['ids']
bboxes = features['bboxes']
labels = features['labels']
if fetch_text_labels:
text_labels = features['text']
cpy_original_image = tf.identity(original_image)
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
original_image = cpy_original_image
# Resize the original image
if original_image.dtype != tf.float32:
original_image = tf.image.convert_image_dtype(original_image, dtype=tf.float32)
shape = tf.shape(original_image)
height = shape[0]
width = shape[1]
new_height, new_width = _largest_size_at_most(height, width, cfg.INPUT_SIZE)
original_image = tf.image.resize_images(original_image, [new_height, new_width], method=0)
original_image = tf.image.pad_to_bounding_box(original_image, 0, 0, cfg.INPUT_SIZE, cfg.INPUT_SIZE)
original_image = tf.image.convert_image_dtype(original_image, dtype=tf.uint8)
# make a copy of the original image for each bounding box
num_bboxes = tf.shape(bboxes)[0]
expanded_original_image = tf.expand_dims(original_image, 0)
concatenated_original_images = tf.tile(expanded_original_image, [num_bboxes, 1, 1, 1])
names = ['original_inputs', 'inputs', 'ids', 'labels']
tensors = [concatenated_original_images, distorted_inputs, ids, labels]
if fetch_text_labels:
names.append('text_labels')
tensors.append(text_labels)
return [names, tensors]
def create_classification_batch(serialized_example, cfg, add_summaries, read_filenames=False):
features = get_region_data(serialized_example, cfg, fetch_ids=True,
fetch_labels=False, fetch_text_labels=False, read_filename=read_filenames)
original_image = features['image']
bboxes = features['bboxes']
ids = features['ids']
distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)
distorted_inputs = tf.subtract(distorted_inputs, 0.5)
distorted_inputs = tf.multiply(distorted_inputs, 2.0)
names = ('inputs', 'ids')
tensors = [distorted_inputs, ids]
return [names, tensors]
def input_nodes(tfrecords, cfg, num_epochs=None, batch_size=32, num_threads=2,
shuffle_batch = True, random_seed=1, capacity = 1000, min_after_dequeue = 96,
add_summaries=True, input_type='train', fetch_text_labels=False,
read_filenames=False):
"""
Args:
tfrecords:
cfg:
num_epochs: number of times to read the tfrecords
batch_size:
num_threads:
shuffle_batch:
capacity:
min_after_dequeue:
add_summaries: Add tensorboard summaries of the images
input_type: 'train', 'visualize', 'test', 'classification'
"""
with tf.name_scope('inputs'):
# A producer to generate tfrecord file paths
filename_queue = tf.train.string_input_producer(
tfrecords,
num_epochs=num_epochs
)
# Construct a Reader to read examples from the tfrecords file
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if input_type=='train' or input_type=='test':
batch_keys, data_to_batch = create_training_batch(serialized_example, cfg, add_summaries, read_filenames)
elif input_type=='visualize':
batch_keys, data_to_batch = create_visualization_batch(serialized_example, cfg, add_summaries, fetch_text_labels, read_filenames)
elif input_type=='classification':
batch_keys, data_to_batch = create_classification_batch(serialized_example, cfg, add_summaries, read_filenames)
else:
raise ValueError("Unknown input type: %s. Options are `train`, `test`, " \
"`visualize`, and `classification`." % (input_type,))
if shuffle_batch:
batch = tf.train.shuffle_batch(
data_to_batch,
batch_size=batch_size,
num_threads=num_threads,
capacity= capacity,
min_after_dequeue= min_after_dequeue,
seed = random_seed,
enqueue_many=True
)
else:
batch = tf.train.batch(
data_to_batch,
batch_size=batch_size,
num_threads=num_threads,
capacity= capacity,
enqueue_many=True
)
batch_dict = {k : v for k, v in zip(batch_keys, batch)}
return batch_dict
|
mit
| 5,512,424,368,205,709,000
| 41.027182
| 141
| 0.625919
| false
| 3.655095
| false
| false
| false
|
sunqm/pyscf
|
pyscf/lo/ibo.py
|
1
|
16558
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Paul J. Robinson <pjrobinson@ucla.edu>
# Qiming Sun <osirpt.sun@gmail.com>
#
'''
Intrinsic Bonding Orbitals
ref. JCTC, 9, 4834
Below here is work done by Paul Robinson.
much of the code below is adapted from code published freely on the website of Gerald Knizia
Ref: JCTC, 2013, 9, 4834-4843
'''
from functools import reduce
import numpy
from pyscf.lib import logger
from pyscf.lo import iao
from pyscf.lo import orth, pipek
from pyscf import __config__
MINAO = getattr(__config__, 'lo_iao_minao', 'minao')
def ibo(mol, orbocc, locmethod='IBO', iaos=None, s=None,
exponent=4, grad_tol=1e-8, max_iter=200, minao=MINAO, verbose=logger.NOTE):
'''Intrinsic Bonding Orbitals
This function serves as a wrapper to the underlying localization functions
ibo_loc and PipekMezey to create IBOs.
Args:
mol : the molecule or cell object
orbocc : occupied molecular orbital coefficients
Kwargs:
locmethod : string
the localization method 'PM' for Pipek Mezey localization or 'IBO' for the IBO localization
iaos : 2D array
the array of IAOs
s : 2D array
the overlap array in the ao basis
Returns:
IBOs in the basis defined in mol object.
'''
if s is None:
if getattr(mol, 'pbc_intor', None): # whether mol object is a cell
if isinstance(orbocc, numpy.ndarray) and orbocc.ndim == 2:
s = mol.pbc_intor('int1e_ovlp', hermi=1)
else:
raise NotImplementedError('k-points crystal orbitals')
else:
s = mol.intor_symmetric('int1e_ovlp')
if iaos is None:
iaos = iao.iao(mol, orbocc)
locmethod = locmethod.strip().upper()
if locmethod == 'PM':
EXPONENT = getattr(__config__, 'lo_ibo_PipekMezey_exponent', exponent)
ibos = PipekMezey(mol, orbocc, iaos, s, exponent=EXPONENT, minao=minao)
del(EXPONENT)
else:
ibos = ibo_loc(mol, orbocc, iaos, s, exponent=exponent,
grad_tol=grad_tol, max_iter=max_iter,
minao=minao, verbose=verbose)
return ibos
def ibo_loc(mol, orbocc, iaos, s, exponent, grad_tol, max_iter,
minao=MINAO, verbose=logger.NOTE):
'''Intrinsic Bonding Orbitals. [Ref. JCTC, 9, 4834]
This implementation follows Knizia's implementation execept that the
resultant IBOs are symmetrically orthogonalized. Note the IBOs of this
implementation do not strictly maximize the IAO Mulliken charges.
IBOs can also be generated by another implementation (see function
pyscf.lo.ibo.PM). In that function, PySCF builtin Pipek-Mezey localization
module was used to maximize the IAO Mulliken charges.
Args:
mol : the molecule or cell object
orbocc : 2D array or a list of 2D array
occupied molecular orbitals or crystal orbitals for each k-point
Kwargs:
iaos : 2D array
the array of IAOs
exponent : integer
Localization power in PM scheme
grad_tol : float
convergence tolerance for norm of gradients
Returns:
IBOs in the big basis (the basis defined in mol object).
'''
log = logger.new_logger(mol, verbose)
assert(exponent in (2, 4))
# Symmetrically orthogonalization of the IAO orbitals as Knizia's
# implementation. The IAO returned by iao.iao function is not orthogonal.
iaos = orth.vec_lowdin(iaos, s)
#static variables
StartTime = logger.perf_counter()
L = 0 # initialize a value of the localization function for safety
#max_iter = 20000 #for some reason the convergence of solid is slower
#fGradConv = 1e-10 #this ought to be pumped up to about 1e-8 but for testing purposes it's fine
swapGradTolerance = 1e-12
#dynamic variables
Converged = False
# render Atoms list without ghost atoms
iao_mol = iao.reference_mol(mol, minao=minao)
Atoms = [iao_mol.atom_pure_symbol(i) for i in range(iao_mol.natm)]
#generates the parameters we need about the atomic structure
nAtoms = len(Atoms)
AtomOffsets = MakeAtomIbOffsets(Atoms)[0]
iAtSl = [slice(AtomOffsets[A],AtomOffsets[A+1]) for A in range(nAtoms)]
#converts the occupied MOs to the IAO basis
CIb = reduce(numpy.dot, (iaos.T, s , orbocc))
numOccOrbitals = CIb.shape[1]
log.debug(" {0:^5s} {1:^14s} {2:^11s} {3:^8s}"
.format("ITER.","LOC(Orbital)","GRADIENT", "TIME"))
for it in range(max_iter):
fGrad = 0.00
#calculate L for convergence checking
L = 0.
for A in range(nAtoms):
for i in range(numOccOrbitals):
CAi = CIb[iAtSl[A],i]
L += numpy.dot(CAi,CAi)**exponent
# loop over the occupied orbitals pairs i,j
for i in range(numOccOrbitals):
for j in range(i):
# I eperimented with exponentially falling off random noise
Aij = 0.0 #numpy.random.random() * numpy.exp(-1*it)
Bij = 0.0 #numpy.random.random() * numpy.exp(-1*it)
for k in range(nAtoms):
CIbA = CIb[iAtSl[k],:]
Cii = numpy.dot(CIbA[:,i], CIbA[:,i])
Cij = numpy.dot(CIbA[:,i], CIbA[:,j])
Cjj = numpy.dot(CIbA[:,j], CIbA[:,j])
#now I calculate Aij and Bij for the gradient search
if exponent == 2:
Aij += 4.*Cij**2 - (Cii - Cjj)**2
Bij += 4.*Cij*(Cii - Cjj)
else:
Bij += 4.*Cij*(Cii**3-Cjj**3)
Aij += -Cii**4 - Cjj**4 + 6*(Cii**2 + Cjj**2)*Cij**2 + Cii**3 * Cjj + Cii*Cjj**3
if (Aij**2 + Bij**2 < swapGradTolerance) and False:
continue
#this saves us from replacing already fine orbitals
else:
#THE BELOW IS TAKEN DIRECLTY FROMG KNIZIA's FREE CODE
# Calculate 2x2 rotation angle phi.
# This correspond to [2] (12)-(15), re-arranged and simplified.
phi = .25*numpy.arctan2(Bij,-Aij)
fGrad += Bij**2
# ^- Bij is the actual gradient. Aij is effectively
# the second derivative at phi=0.
# 2x2 rotation form; that's what PM suggest. it works
# fine, but I don't like the asymmetry.
cs = numpy.cos(phi)
ss = numpy.sin(phi)
Ci = 1. * CIb[:,i]
Cj = 1. * CIb[:,j]
CIb[:,i] = cs * Ci + ss * Cj
CIb[:,j] = -ss * Ci + cs * Cj
fGrad = fGrad**.5
log.debug(" {0:5d} {1:12.8f} {2:11.2e} {3:8.2f}"
.format(it+1, L**(1./exponent), fGrad, logger.perf_counter()-StartTime))
if fGrad < grad_tol:
Converged = True
break
Note = "IB/P%i/2x2, %i iter; Final gradient %.2e" % (exponent, it+1, fGrad)
if not Converged:
log.note("\nWARNING: Iterative localization failed to converge!"
"\n %s", Note)
else:
log.note(" Iterative localization: %s", Note)
log.debug(" Localized orbitals deviation from orthogonality: %8.2e",
numpy.linalg.norm(numpy.dot(CIb.T, CIb) - numpy.eye(numOccOrbitals)))
# Note CIb is not unitary matrix (although very close to unitary matrix)
# because the projection <IAO|OccOrb> does not give unitary matrix.
return numpy.dot(iaos, (orth.vec_lowdin(CIb)))
def PipekMezey(mol, orbocc, iaos, s, exponent, minao=MINAO):
'''
Note this localization is slightly different to Knizia's implementation.
The localization here reserves orthogonormality during optimization.
Orbitals are projected to IAO basis first and the Mulliken pop is
calculated based on IAO basis (in function atomic_pops). A series of
unitary matrices are generated and applied on the input orbitals. The
intemdiate orbitals in the optimization and the finally localized orbitals
are all orthogonormal.
Examples:
>>> from pyscf import gto, scf
>>> from pyscf.lo import ibo
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', >>> basis='unc-sto3g')
>>> mf = scf.RHF(mol).run()
>>> pm = ibo.PM(mol, mf.mo_coeff[:,mf.mo_occ>0])
>>> loc_orb = pm.kernel()
'''
# Note: PM with Lowdin-orth IAOs is implemented in pipek.PM class
# TODO: Merge the implemenation here to pipek.PM
cs = numpy.dot(iaos.T.conj(), s)
s_iao = numpy.dot(cs, iaos)
iao_inv = numpy.linalg.solve(s_iao, cs)
iao_mol = iao.reference_mol(mol, minao=minao)
# Define the mulliken population of each atom based on IAO basis.
# proj[i].trace is the mulliken population of atom i.
def atomic_pops(mol, mo_coeff, method=None):
nmo = mo_coeff.shape[1]
proj = numpy.empty((mol.natm,nmo,nmo))
orb_in_iao = reduce(numpy.dot, (iao_inv, mo_coeff))
for i, (b0, b1, p0, p1) in enumerate(iao_mol.offset_nr_by_atom()):
csc = reduce(numpy.dot, (orb_in_iao[p0:p1].T, s_iao[p0:p1],
orb_in_iao))
proj[i] = (csc + csc.T) * .5
return proj
pm = pipek.PM(mol, orbocc)
pm.atomic_pops = atomic_pops
pm.exponent = exponent
return pm
PM = Pipek = PipekMezey
def shell_str(l, n_cor, n_val):
'''
Help function to define core and valence shells for shell with different l
'''
cor_shell = [
"[{n}s]", "[{n}px] [{n}py] [{n}pz]",
"[{n}d0] [{n}d2-] [{n}d1+] [{n}d2+] [{n}d1-]",
"[{n}f1+] [{n}f1-] [{n}f0] [{n}f3+] [{n}f2-] [{n}f3-] [{n}f2+]"]
val_shell = [
l_str.replace('[', '').replace(']', '') for l_str in cor_shell]
l_str = ' '.join(
[cor_shell[l].format(n=i) for i in range(l + 1, l + 1 + n_cor)] +
[val_shell[l].format(n=i) for i in range(l + 1 + n_cor,
l + 1 + n_cor + n_val)])
return l_str
'''
These are parameters for selecting the valence space correctly.
The parameters are taken from in G. Knizia's free code
https://sites.psu.edu/knizia/software/
'''
def MakeAtomInfos():
nCoreX = {"H": 0, "He": 0}
for At in "Li Be B C O N F Ne".split(): nCoreX[At] = 1
for At in "Na Mg Al Si P S Cl Ar".split(): nCoreX[At] = 5
for At in "Na Mg Al Si P S Cl Ar".split(): nCoreX[At] = 5
for At in "K Ca".split(): nCoreX[At] = 18/2
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): nCoreX[At] = 18/2
for At in "Ga Ge As Se Br Kr".split(): nCoreX[At] = 18/2+5 # [Ar] and the 5 d orbitals.
nAoX = {"H": 1, "He": 1}
for At in "Li Be".split(): nAoX[At] = 2
for At in "B C O N F Ne".split(): nAoX[At] = 5
for At in "Na Mg".split(): nAoX[At] = 3*1 + 1*3
for At in "Al Si P S Cl Ar".split(): nAoX[At] = 3*1 + 2*3
for At in "K Ca".split(): nAoX[At] = 18/2+1
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): nAoX[At] = 18/2+1+5 # 4s, 3d
for At in "Ga Ge As Se Br Kr".split(): nAoX[At] = 18/2+1+5+3
AoLabels = {}
def SetAo(At, AoDecl):
Labels = AoDecl.split()
AoLabels[At] = Labels
assert(len(Labels) == nAoX[At])
nCore = len([o for o in Labels if o.startswith('[')])
assert(nCore == nCoreX[At])
# atomic orbitals in the MINAO basis: [xx] denotes core orbitals.
for At in "H He".split(): SetAo(At, "1s")
for At in "Li Be".split(): SetAo(At, "[1s] 2s")
for At in "B C O N F Ne".split(): SetAo(At, "[1s] 2s 2px 2py 2pz")
for At in "Na Mg".split(): SetAo(At, "[1s] [2s] 3s [2px] [2py] [2pz]")
for At in "Al Si P S Cl Ar".split(): SetAo(At, "[1s] [2s] 3s [2px] [2py] [2pz] 3px 3py 3pz")
for At in "K Ca".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz]")
for At in "Sc Ti V Cr Mn Fe Co Ni Cu Zn".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz] 3d0 3d2- 3d1+ 3d2+ 3d1-")
for At in "Ga Ge As Se Br Kr".split(): SetAo(At, "[1s] [2s] [3s] 4s [2px] [2py] [2pz] [3px] [3py] [3pz] 4px 4py 4pz [3d0] [3d2-] [3d1+] [3d2+] [3d1-]")
for At in "Rb Sr".split():
nCoreX[At] = 36/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,0),
shell_str(2,1,0)]))
for At in "Y Zr Nb Mo Tc Ru Rh Pd Ag Cd".split():
nCoreX[At] = 36/2
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,0),
shell_str(2,1,1)]))
for At in "In Sn Sb Te I Xe".split():
nCoreX[At] = 36/2 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,4,1),
shell_str(1,3,1),
shell_str(2,2,0)]))
for At in "Cs Ba".split():
nCoreX[At] = 54/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,0)]))
for At in "Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu".split():
nCoreX[At] = 54/2
nAoX[At] = nCoreX[At] + 1 + 5 + 7
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,1),
shell_str(3,0,1)]))
for At in "La Hf Ta W Re Os Ir Pt Au Hg".split():
nCoreX[At] = 54/2 + 7
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,0),
shell_str(2,2,1),
shell_str(3,1,0)]))
for At in "Tl Pb Bi Po At Rn".split():
nCoreX[At] = 54/2 + 7 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,5,1),
shell_str(1,4,1),
shell_str(2,3,0),
shell_str(3,1,0)]))
for At in "Fr Ra".split():
nCoreX[At] = 86/2
nAoX[At] = nCoreX[At] + 1
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,0),
shell_str(3,1,0)]))
for At in "Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No".split():
nCoreX[At] = 86/2
nAoX[At] = nCoreX[At] + 1 + 5 + 7
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,1),
shell_str(3,1,1)]))
for At in "Ac Lr Rf Db Sg Bh Hs Mt Ds Rg Cn".split():
nCoreX[At] = 86/2 + 7
nAoX[At] = nCoreX[At] + 1 + 5
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,0),
shell_str(2,3,1),
shell_str(3,2,0)]))
for At in "Nh Fl Mc Lv Ts Og".split():
nCoreX[At] = 86/2 + 7 + 5
nAoX[At] = nCoreX[At] + 1 + 3
SetAo(At, ' '.join ([shell_str(0,6,1),
shell_str(1,5,1),
shell_str(2,4,0),
shell_str(3,2,0)]))
# note: f order is '4f1+','4f1-','4f0','4f3+','4f2-','4f3-','4f2+',
return nCoreX, nAoX, AoLabels
def MakeAtomIbOffsets(Atoms):
"""calcualte offset of first orbital of individual atoms
in the valence minimal basis (IB)"""
nCoreX, nAoX, AoLabels = MakeAtomInfos()
iBfAt = [0]
for Atom in Atoms:
Atom = ''.join(char for char in Atom if char.isalpha())
iBfAt.append(iBfAt[-1] + nAoX[Atom])
return iBfAt, nCoreX, nAoX, AoLabels
del(MINAO)
|
apache-2.0
| 5,620,724,558,578,377,000
| 39.583333
| 155
| 0.543967
| false
| 2.981812
| false
| false
| false
|
raxod502/straight.el
|
watcher/straight_watch_callback.py
|
1
|
2215
|
#!/usr/bin/env -S python3 -u
import os
import pathlib
import sys
WATCHEXEC_VAR_COMMON = "WATCHEXEC_COMMON_PATH"
WATCHEXEC_VARS = [
"WATCHEXEC_CREATED_PATH",
"WATCHEXEC_REMOVED_PATH",
"WATCHEXEC_RENAMED_PATH",
"WATCHEXEC_WRITTEN_PATH",
"WATCHEXEC_META_CHANGED_PATH",
]
def die(message):
print(message, file=sys.stderr)
sys.exit(1)
def usage():
return "usage: python -m straight_watch_callback <repos-dir> <modified-dir>"
def path_contains(parent, child):
parent = pathlib.Path(parent).resolve()
child = pathlib.Path(child).resolve()
return parent in child.parents
def path_strip(parent, child):
parent = pathlib.Path(parent).parts
child = pathlib.Path(child).parts
return child[len(parent)]
def main(args):
if len(args) != 2:
die(usage())
repos_dir, modified_dir = args
repos_dir = pathlib.Path(repos_dir).resolve()
modified_dir = pathlib.Path(modified_dir).resolve()
paths = []
for var in WATCHEXEC_VARS:
if var in os.environ:
for path in os.environ[var].split(os.pathsep):
paths.append(path)
if not paths:
die("straight_watch_callback.py: watchexec gave no modified files")
if WATCHEXEC_VAR_COMMON in os.environ:
common = os.environ[WATCHEXEC_VAR_COMMON]
# Yes, string concatentation. For some reason when a common
# prefix is used, the individual paths start with a slash even
# though they're actually relative to the prefix.
paths = [common + path for path in paths]
paths = [pathlib.Path(path).resolve() for path in paths]
paths = sorted(set(paths))
repos = set()
for path in paths:
print("detect modification: {}".format(path), file=sys.stderr)
if repos_dir in path.parents:
repo = path.relative_to(repos_dir).parts[0]
repos.add(repo)
if repos:
modified_dir.mkdir(parents=True, exist_ok=True)
repos = sorted(repos)
for repo in repos:
print("--> mark for rebuild: {}".format(repo), file=sys.stderr)
with open(modified_dir / repo, "w"):
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
mit
| -3,666,085,764,388,796,400
| 28.144737
| 80
| 0.628894
| false
| 3.49921
| false
| false
| false
|
Onager/plaso
|
plaso/cli/status_view.py
|
1
|
18865
|
# -*- coding: utf-8 -*-
"""The status view."""
import ctypes
import sys
import time
try:
import win32api
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import tools
from plaso.cli import views
from plaso.lib import definitions
class StatusView(object):
"""Processing status view."""
MODE_LINEAR = 'linear'
MODE_WINDOW = 'window'
_SOURCE_TYPES = {
definitions.SOURCE_TYPE_ARCHIVE: 'archive',
dfvfs_definitions.SOURCE_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.SOURCE_TYPE_FILE: 'single file',
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE: (
'storage media device'),
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE: (
'storage media image')}
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
_WINAPI_STD_OUTPUT_HANDLE = -11
_WINAPI_ENABLE_PROCESSED_INPUT = 1
_WINAPI_ENABLE_LINE_INPUT = 2
_WINAPI_ENABLE_ECHO_INPUT = 4
_WINAPI_ANSI_CONSOLE_MODE = (
_WINAPI_ENABLE_PROCESSED_INPUT | _WINAPI_ENABLE_LINE_INPUT |
_WINAPI_ENABLE_ECHO_INPUT)
def __init__(self, output_writer, tool_name):
"""Initializes a status view.
Args:
output_writer (OutputWriter): output writer.
tool_name (str): namd of the tool.
"""
super(StatusView, self).__init__()
self._artifact_filters = None
self._filter_file = None
self._have_ansi_support = not win32console
self._mode = self.MODE_WINDOW
self._output_writer = output_writer
self._source_path = None
self._source_type = None
self._stdout_output_writer = isinstance(
output_writer, tools.StdoutOutputWriter)
self._storage_file_path = None
self._tool_name = tool_name
if win32console:
kernel32 = ctypes.windll.kernel32
stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)
result = kernel32.SetConsoleMode(
stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)
self._have_ansi_support = result != 0
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):
"""Adds an analysis process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
events = ''
if (process_status.number_of_consumed_events is not None and
process_status.number_of_consumed_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_consumed_events,
process_status.number_of_consumed_events_delta)
event_tags = ''
if (process_status.number_of_produced_event_tags is not None and
process_status.number_of_produced_event_tags_delta is not None):
event_tags = '{0:d} ({1:d})'.format(
process_status.number_of_produced_event_tags,
process_status.number_of_produced_event_tags_delta)
reports = ''
if (process_status.number_of_produced_reports is not None and
process_status.number_of_produced_reports_delta is not None):
reports = '{0:d} ({1:d})'.format(
process_status.number_of_produced_reports,
process_status.number_of_produced_reports_delta)
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, events, event_tags, reports])
def _AddExtractionProcessStatusTableRow(self, process_status, table_view):
"""Adds an extraction process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
sources = ''
if (process_status.number_of_produced_sources is not None and
process_status.number_of_produced_sources_delta is not None):
sources = '{0:d} ({1:d})'.format(
process_status.number_of_produced_sources,
process_status.number_of_produced_sources_delta)
events = ''
if (process_status.number_of_produced_events is not None and
process_status.number_of_produced_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_produced_events,
process_status.number_of_produced_events_delta)
# TODO: shorten display name to fit in 80 chars and show the filename.
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, sources, events, process_status.display_name])
def _ClearScreen(self):
"""Clears the terminal/console screen."""
if self._have_ansi_support:
# ANSI escape sequence to clear screen.
self._output_writer.Write('\033[2J')
# ANSI escape sequence to move cursor to top left.
self._output_writer.Write('\033[H')
elif win32console:
# This version of Windows cmd.exe does not support ANSI escape codes, thus
# instead we fill the console screen buffer with spaces. The downside of
# this approach is an annoying flicker.
top_left_coordinate = win32console.PyCOORDType(0, 0)
screen_buffer = win32console.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
screen_buffer_information = screen_buffer.GetConsoleScreenBufferInfo()
screen_buffer_attributes = screen_buffer_information['Attributes']
screen_buffer_size = screen_buffer_information['Size']
console_size = screen_buffer_size.X * screen_buffer_size.Y
screen_buffer.FillConsoleOutputCharacter(
' ', console_size, top_left_coordinate)
screen_buffer.FillConsoleOutputAttribute(
screen_buffer_attributes, console_size, top_left_coordinate)
screen_buffer.SetConsoleCursorPosition(top_left_coordinate)
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
def _FormatSizeInUnitsOf1024(self, size):
"""Represents a number of bytes in units of 1024.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1024 = 0
used_memory_1024 = float(size)
while used_memory_1024 >= 1024:
used_memory_1024 /= 1024
magnitude_1024 += 1
if 0 < magnitude_1024 <= 7:
return '{0:.1f} {1:s}'.format(
used_memory_1024, self._UNITS_1024[magnitude_1024])
return '{0:d} B'.format(size)
def _FormatProcessingTime(self, processing_status):
"""Formats the processing time.
Args:
processing_status (ProcessingStatus): processing status.
Returns:
str: processing time formatted as: "5 days, 12:34:56".
"""
processing_time = 0
if processing_status:
processing_time = time.time() - processing_status.start_time
processing_time, seconds = divmod(int(processing_time), 60)
processing_time, minutes = divmod(processing_time, 60)
days, hours = divmod(processing_time, 24)
if days == 0:
days_string = ''
elif days == 1:
days_string = '1 day, '
else:
days_string = '{0:d} days, '.format(days)
return '{0:s}{1:02d}:{2:02d}:{3:02d}'.format(
days_string, hours, minutes, seconds)
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_consumed_events)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_consumed_events)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
"""Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintExtractionStatusUpdateLinear(self, processing_status):
"""Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_produced_events,
processing_status.foreman_status.display_name)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_produced_events,
worker_status.display_name)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintExtractionStatusUpdateWindow(self, processing_status):
"""Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',
'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintEventsStatus(self, events_status):
"""Prints the status of the events.
Args:
events_status (EventsStatus): events status.
"""
if events_status:
table_view = views.CLITabularTableView(
column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates',
'MACB grouped', 'Total'],
column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow([
'', events_status.number_of_filtered_events,
events_status.number_of_events_from_time_slice,
events_status.number_of_duplicate_events,
events_status.number_of_macb_grouped_events,
events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def _PrintTasksStatus(self, processing_status):
"""Prints the status of the tasks.
Args:
processing_status (ProcessingStatus): processing status.
"""
if processing_status and processing_status.tasks_status:
tasks_status = processing_status.tasks_status
table_view = views.CLITabularTableView(
column_names=['Tasks:', 'Queued', 'Processing', 'Merging',
'Abandoned', 'Total'],
column_sizes=[15, 7, 15, 15, 15, 0])
table_view.AddRow([
'', tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def GetAnalysisStatusUpdateCallback(self):
"""Retrieves the analysis status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintAnalysisStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintAnalysisStatusUpdateWindow
return None
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None
# TODO: refactor to protected method.
def PrintExtractionStatusHeader(self, processing_status):
"""Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write(
'Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(
artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(
self._filter_file))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
def PrintExtractionSummary(self, processing_status):
"""Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
def SetMode(self, mode):
"""Sets the mode.
Args:
mode (str): status view mode.
"""
self._mode = mode
def SetSourceInformation(
self, source_path, source_type, artifact_filters=None, filter_file=None):
"""Sets the source information.
Args:
source_path (str): path of the source.
source_type (str): source type.
artifact_filters (Optional[list[str]]): names of artifact definitions to
use as filters.
filter_file (Optional[str]): filter file.
"""
self._artifact_filters = artifact_filters
self._filter_file = filter_file
self._source_path = source_path
self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
def SetStorageFileInformation(self, storage_file_path):
"""Sets the storage file information.
Args:
storage_file_path (str): path to the storage file.
"""
self._storage_file_path = storage_file_path
|
apache-2.0
| -6,684,999,783,999,508,000
| 33.935185
| 80
| 0.660906
| false
| 3.798067
| false
| false
| false
|
kleinfeld/medpy
|
setup.py
|
1
|
4152
|
#!/usr/bin/env python
# version: 0.1.2
import os
# setuptools >= 0.7 supports 'python setup.py develop'
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# The maxflow graphcut wrapper using boost.python
maxflow = Extension('medpy.graphcut.maxflow',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
sources = ['lib/maxflow/src/maxflow.cpp', 'lib/maxflow/src/wrapper.cpp', 'lib/maxflow/src/graph.cpp'],
libraries = ['boost_python'],
extra_compile_args = ['-O0'])
setup(name='MedPy',
version='0.1.0', # major.minor.micro
description='Medical image processing in Python',
author='Oskar Maier',
author_email='oskar.maier@googlemail.com',
url='https://github.com/loli/medpy',
license='LICENSE.txt',
keywords='medical image processing dicom itk insight tool kit MRI CT US graph cut max-flow min-cut',
long_description=read('README.txt'),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
#'Operating System :: MacOS :: MacOS X',
#'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: C++',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Image Recognition'
],
install_requires=[
"scipy >= 0.9.0",
"numpy >= 1.6.1",
],
extras_require = {
'Nifti/Analyze': ["nibabel >= 1.3.0", "RXP"],
'Dicom': ["pydicom >= 0.9.7"],
'Additional image formats' : ["itk >= 3.16.0"]
},
packages = [
'medpy',
'medpy.core',
'medpy.features',
'medpy.filter',
'medpy.graphcut',
'medpy.io',
'medpy.itkvtk',
'medpy.itkvtk.filter',
'medpy.itkvtk.utilities',
'medpy.metric',
'medpy.occlusion',
'medpy.utilities'
],
scripts=[
'bin/medpy_anisotropic_diffusion.py',
'bin/medpy_apparent_diffusion_coefficient.py',
'bin/medpy_check_marker_intersection.py',
'bin/medpy_convert.py',
'bin/medpy_count_labels.py',
'bin/medpy_create_empty_volume_by_example.py',
'bin/medpy_dicom_slices_to_volume.py',
'bin/medpy_dicom_to_4D.py',
'bin/medpy_diff.py',
'bin/medpy_evaluate_miccai2007.py',
'bin/medpy_extract_min_max.py',
'bin/medpy_extract_sub_volume_auto.py',
'bin/medpy_extract_sub_volume_by_example.py',
'bin/medpy_extract_sub_volume.py',
'bin/medpy_gradient.py',
'bin/medpy_graphcut_label.py',
'bin/medpy_graphcut_label_bgreduced.py',
'bin/medpy_graphcut_label_w_regional.py',
'bin/medpy_graphcut_label_wsplit.py',
'bin/medpy_graphcut_voxel.py',
'bin/medpy_grid.py',
'bin/medpy_info.py',
'bin/medpy_intensity_range_standardization.py',
'bin/medpy_itk_gradient.py',
'bin/medpy_itk_smoothing.py',
'bin/medpy_itk_watershed.py',
'bin/medpy_join_xd_to_xplus1d.py',
'bin/medpy_merge.py',
'bin/medpy_morphology.py',
'bin/medpy_occlusion.py',
'bin/medpy_reduce.py',
'bin/medpy_resample.py',
'bin/medpy_reslice_3d_to_4d.py',
'bin/medpy_set_pixel_spacing.py',
'bin/medpy_shrink_image.py',
'bin/medpy_split_xd_to_xminus1d.py',
'bin/medpy_stack_sub_volumes.py',
'bin/medpy_superimposition.py',
'bin/medpy_swap_dimensions.py',
'bin/medpy_zoom_image.py'
],
ext_modules = [maxflow],
)
|
gpl-3.0
| -4,711,353,523,098,394,000
| 33.31405
| 114
| 0.589355
| false
| 3.181609
| false
| false
| false
|
CARPEM/GalaxyDocker
|
data-manager-hegp/analysisManager/analysismanager/STARTUP_Add_Workflows_Information.py
|
1
|
8085
|
import os
import sys
import json
from datamanagerpkg import ProtonCommunication_data_manager
from datamanagerpkg import GalaxyCommunication_data_manager
from sequencer.models import Experiments, GalaxyUsers
from sequencer.models import GalaxyJobs, ExperimentRawData
from sequencer.models import UserCommonJobs,Supportedfiles
from sequencer.models import Workflows,WorkflowsTools
##########################
#URL SEQUENCER
##########################
from GlobalVariables import sequencer_base_url
from GlobalVariables import sequencer_user
from GlobalVariables import sequencer_password
from GlobalVariables import sequencer_severName
from GlobalVariables import sequencer_ExperimentLimit
from GlobalVariables import toolsInformation
##########################
#URL GALAXY
##########################
from GlobalVariables import galaxy_base_url
from GlobalVariables import apiKey
##########################
#NAs DIr folder
##########################
from GlobalVariables import nasInput
from GlobalVariables import CNVfolderName
from GlobalVariables import plasmaFolderName
from GlobalVariables import nasResults
from GlobalVariables import workflowPath
##########################
#SMTP folder
##########################
from GlobalVariables import smtpServerAphp
from GlobalVariables import smtpPortServer
from GlobalVariables import fromAddrOfficial
from sequencer.views import getDataPath
from pprint import pprint
def uploadAWorkflowToDatabase(pathToWorkflow):
with open(pathToWorkflow) as data_file:
data = json.load(data_file)
pprint(data)
#now I have the key in order
stepkey=data['steps'].keys()
stepkey = [int(x) for x in stepkey]
stepkey.sort()
#create a workflow object
#~ u'annotation': u'plasma workflow to generates all the data',u'name': u'Plasma_mutation',
tryexp = None
try:
tryexp = Workflows.objects.get(name=str(data['name']))
except Workflows.DoesNotExist:
tryexp = None
if (tryexp == None):
workflow_local=Workflows(name=str(data['name']),description=str(data['name']))
workflow_local.save()
workflow_local = Workflows.objects.get(name=str(data['name']))
for step in stepkey:
if data['steps'][str(step)]['tool_id']!=None:
#create a tool
print("find a Tool to add, try to add this new tool to the database")
print(str(data['steps'][str(step)]['tool_id']))
try:
tryexp = WorkflowsTools.objects.get(primary_name=str(data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json"))
except WorkflowsTools.DoesNotExist:
tryexp = None
#~ if tryexp == None:
print("tool found was not added to the DB. We Add now this new tool")
newtool=WorkflowsTools(primary_name=str(data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json"),
name=str(data['steps'][str(step)]['tool_id']),
version=str(data['steps'][str(step)]['tool_version']))
newtool.save()
print("Add the tool definition to the Workflow and link it to the current workflow.")
workflow_local.tools_list.add(newtool)
workflow_local.save()
print("Name of the json file where the tool is define:" +data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json")
#create a tool
with open(toolsInformation+data['steps'][str(step)]['tool_id']+"_"+data['steps'][str(step)]['tool_version']+".json") as data_file_tool:
tool = json.load(data_file_tool)
#~ print(tool['function'][0])
print("#######################input")
#~ print(tool['function'][0]['input'])
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']))
newfile.save()
newtool.inputlist.add(newfile)
newtool.save()
#~ print("#######################dataInpty")
print("#######################output")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
#~ if tryexp == None:
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']) )
newfile.save()
newtool.outputlist.add(newfile)
newtool.save()
def AddaWorkflowTool(this_tool):
try:
tryexp = WorkflowsTools.objects.get(primary_name=str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"))
except WorkflowsTools.DoesNotExist:
tryexp = None
print("tool found was not added to the DB. We Add now this new tool")
newtool=WorkflowsTools(primary_name=str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"),
name=str(this_tool[0]['id']),
version=str(this_tool[0]['version']))
newtool.save()
print("Add the tool definition to the Workflow and link it to the current workflow.")
print("Name of the json file where the tool is define:" +str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json"))
#create a tool
with open(toolsInformation+str(this_tool[0]['id']+"_"+this_tool[0]['version']+".json")) as data_file_tool:
tool = json.load(data_file_tool)
print("#######################input")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']))
newfile.save()
newtool.inputlist.add(newfile)
newtool.save()
#~ print("#######################dataInpty")
print("#######################output")
for dataInput in tool['function'][0]['input'] :
try:
tryexp = Supportedfiles.objects.get(dataDescription=str(dataInput['dataDescription']))
except Supportedfiles.DoesNotExist:
tryexp = None
newfile=Supportedfiles(dataHandle=str(dataInput['dataHandle']),dataDescription=str(dataInput['dataDescription']),dataFormatEdamOntology=str(dataInput['format'][0]['uri']) )
newfile.save()
newtool.outputlist.add(newfile)
newtool.save()
if __name__ == "__main__":
print("#######################")
print("#######################")
pathTosamtools='/nas_Dir/workflow/Galaxy-Workflow-demo_samtools.ga'
print("Upload a specific workflow to the database : demo_samtools")
uploadAWorkflowToDatabase(pathTosamtools)
print("#######################")
print("#######################")
pathToWorkflow='/nas_Dir/workflow/Galaxy-Workflow-Plasma_mutation.ga'
print("Upload a specific workflow to the database : Plasma_mutation")
uploadAWorkflowToDatabase(pathToWorkflow)
print("JOB DONE")
|
mit
| 5,939,821,248,430,112,000
| 48.601227
| 196
| 0.590847
| false
| 4.239643
| false
| false
| false
|
MMaus/mutils
|
models/slip_doPri-old.py
|
1
|
22110
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 14:46:14 2011
@author: moritz
"""
# This file implements the SLIP model
from scipy.integrate.vode import dvode, zvode
from scipy.integrate import odeint, ode
from pylab import (zeros, sin, cos, sqrt, array, linspace,
arange, ones_like, hstack, vstack, argmin,
find, interp,
sign)
from copy import deepcopy
def vHop(t,y,params):
"""
test function
'y': x horizontal position
y vertical position
vx horizontal velocity
vy vertical velocity
'params': damping horizontal
ground elasticity / mass
"""
res = zeros(4)
dx = params[0]
ky = params[1]
res[0] = y[2]
res[1] = y[3]
if y[1] < 0:
res[2] = -dx*y[2]
res[3] = -ky*y[1] - 9.81
else:
res[3] = -9.81
return res
def dk_dL(L0,k,L,dE):
"""
computes the required stiffness change and rest length change
to inject energy without changing the spring force
"""
dL = 2.*dE/(k*(L0 - L))
dk = k*((L-L0)/(L-(L0+dL)) - 1.)
return dk,dL
class SimFailError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def SLIP_step(IC,SLIP_params,sim_params = []):
"""
simulates the SLIP
IC: initial state vector, containing y0, vx0
(x0 is assumed to be 0; vy0 = 0 (apex);
also ground level = 0)
SLIP_params:
k
L0
m
alpha
dE: energy change in "midstance" by changing k and L0
g: gravity (negative! should be ~ -9.81 for SI)
"""
alpha = SLIP_params['alpha']
k = SLIP_params['k']
L0 = SLIP_params['L0']
dE = SLIP_params['dE']
g = SLIP_params['g']
m = SLIP_params['m']
y0 = IC[0]
vx0 = IC[1]
if g >= 0:
raise ValueError, "gravity points into wrong direction!"
# concatenate state vector of four elements:
# (1) time to touchdown
# (2) time to vy = 0
# (3) time to takeoff
# (4) time to apex
# (1) and (4) are analytically described
y_land = L0*sin(alpha)
if y0 < y_land:
raise ValueError, "invalid starting condition"
# before starting, define the model:
def SLIP_ode(y,t,params):
"""
defines the ODE of the SLIP, under stance condition
state:
[x
y
vx
vy]
params:
{'L0' : leg rest length
'x0' : leg touchdown position
'k' : spring stiffness
'm' : mass}
"""
dy0 = y[2]
dy1 = y[3]
L = sqrt((y[0]-params['xF'])**2 + y[1]**2)
F = params['k']*(params['L0']-L)
Fx = F*(y[0]-params['xF'])/L
Fy = F*y[1]/L
dy2 = Fx/m
dy3 = Fy/m + params['g']
return hstack([dy0,dy1,dy2,dy3])
def sim_until(IC, params, stop_fcn, tmax = 2.):
"""
simulated the SLIP_ode until stop_fcn has a zero-crossing
includes a refinement of the time at this instant
stop_fcn must be a function of the system state, e.g.
stop_fcn(IC) must exist
this function is especially adapted to the SLIP state,
so it uses dot(x1) = x3, dot(x2) = x4
tmax: maximal simulation time [s]
"""
init_sign = sign(stop_fcn(IC))
#1st: evaluate a certain fraction
tvec_0 = .0001*arange(50)
sim_results = []
sim_tvecs = []
newIC = IC
sim_results.append (odeint(SLIP_ode,newIC,tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot = 0.
while min(check_vec) > 0:
newIC = sim_results[-1][-1,:]
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot += tvec_0[-1]
# time exceeded or ground hit
if t_tot > tmax or min(sim_results[-1][:,1] < 0):
raise SimFailError, "simulation failed"
# now: zero-crossing detected
# -> refine!
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen!"
# refine simulation by factor 50, but only for two
# adjacent original time frames
newIC = sim_results[-1][minidx-1,:]
sim_results[-1] = sim_results[-1][:minidx,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx]
# avoid that last position can be the zero-crossing
n_refine = 10000
tvec_0 = linspace(tvec_0[0], tvec_0[1] + 2./n_refine, n_refine+2)
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-12))
sim_tvecs.append(tvec_0)
# linearly interpolate to zero
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen! (2)"
# compute location of zero-crossing
y0 = sim_results[-1][minidx-1,:]
y1 = sim_results[-1][minidx,:]
fcn0 = stop_fcn(y0)
fcn1 = stop_fcn(y1)
t0 = tvec_0[minidx-1]
t1 = tvec_0[minidx]
t_zero = t0 - (t1-t0)*fcn0/(fcn1 - fcn0)
# cut last simulation result and replace last values
# by interpolated values
sim_results[-1] = sim_results[-1][:minidx+1,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx+1]
for coord in arange(sim_results[-1].shape[1]):
sim_results[-1][-1,coord] = interp(
t_zero, [t0,t1],
[sim_results[-1][-2,coord], sim_results[-1][-1,coord]] )
sim_tvecs[-1][-1] = t_zero
#newIC = sim_results[-1][minidx-1,:]
#sim_results[-1] = sim_results[-1][:minidx,:]
#sim_tvecs[-1] = sim_tvecs[-1][:minidx]
#tvec_0 = linspace(tvec_0[0],tvec_0[1],100)
#sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
# args=(params,),rtol=1e-9))
#sim_tvecs.append(tvec_0)
# concatenate lists
sim_data = vstack( [x[:-1,:] for x in sim_results[:-1] if x.shape[0] > 1]
+ [sim_results[-1],])
sim_time = [sim_tvecs[0],]
for idx in arange(1,len(sim_tvecs)):
sim_time.append(sim_tvecs[idx] + sim_time[-1][-1])
sim_time = hstack([x[:-1] for x in sim_time[:-1]] + [sim_time[-1],])
return sim_data, sim_time
# Section 1: time to touchdown
# TODO: make sampling frequency regular
t_flight1 = sqrt(-2.*(y0 - y_land)/g)
#t_flight = sqrt()
tvec_flight1 = .01*arange(t_flight1*100.)
vy_flight1 = tvec_flight1*g
y_flight1 = y0 + .5*g*(tvec_flight1**2)
x_flight1 = vx0*tvec_flight1
vx_flight1 = vx0*ones_like(tvec_flight1)
# Section 2: time to vy = 0
# approach: calculate forward -> estimate interval of
# zero position of vy -> refine simulation in that interval
# until a point with vy sufficiently close to zero is in the
# resulting vector
params = {'L0' : L0,
'xF' : t_flight1*vx0 + L0*cos(alpha),
'k' : k,
'm' : m,
'g' : g}
IC = array([t_flight1*vx0, y_land, vx0, t_flight1*g])
# initial guess: L0*cos(alpha)/vx0
#t_sim1 = L0*cos(alpha)/vx0
# TODO: implement sim_fail check!
sim_fail = False
try:
sim_phase2, t_phase2 = sim_until(IC,params,lambda x: x[3])
t_phase2 += t_flight1
except SimFailError:
print 'simulation aborted (phase 2)\n'
sim_fail = True
# Phase 3:
if not sim_fail:
L = sqrt(sim_phase2[-1,1]**2 + (sim_phase2[-1,0]-params['xF'])**2 )
dk, dL = dk_dL(L0,k,L,dE)
params2 = deepcopy(params)
params2['k'] += dk
params2['L0'] += dL
IC = sim_phase2[-1,:]
compression = (lambda x: sqrt(
(x[0]-params2['xF'])**2 + x[1]**2)
- params2['L0'] )
#print ('L:', L, 'dk', dk, 'dL', dL, 'dE', dE, '\ncompression:', compression(IC),
# 'IC', IC)
try:
sim_phase3, t_phase3 = sim_until(IC, params2,compression)
sim_phase3 = sim_phase3[1:,:]
t_phase3 = t_phase3[1:] + t_phase2[-1]
except SimFailError:
print 'simulation aborted (phase 3)\n'
sim_fail = True
# Phase 4:
if not sim_fail:
# time to apex
# TODO: make sampling frequency regular
vy_liftoff = sim_phase3[-1,3]
t_flight2 = -1.*vy_liftoff/g
#t_flight = sqrt()
tvec_flight2 = arange(t_flight2,0,-.001)[::-1]
vy_flight2 = tvec_flight2*g + vy_liftoff
y_flight2 = (sim_phase3[-1,1] + vy_liftoff*tvec_flight2
+ .5*g*(tvec_flight2**2) )
x_flight2 = sim_phase3[-1,0] + sim_phase3[-1,2]*tvec_flight2
vx_flight2 = sim_phase3[-1,2]*ones_like(tvec_flight2)
tvec_flight2 += t_phase3[-1]
# todo: return data until error
if sim_fail:
return { 't': None,
'x': None,
'y': None,
'vx': None,
'vy': None,
'sim_fail': sim_fail,
'dk': None,
'dL': None
}
# finally: concatenate phases
x_final = hstack([x_flight1, sim_phase2[:,0], sim_phase3[:,0], x_flight2 ])
y_final = hstack([y_flight1, sim_phase2[:,1], sim_phase3[:,1], y_flight2 ])
vx_final= hstack([vx_flight1, sim_phase2[:,2], sim_phase3[:,2], vx_flight2])
vy_final= hstack([vy_flight1, sim_phase2[:,3], sim_phase3[:,3], vy_flight2])
tvec_final = hstack([tvec_flight1, t_phase2, t_phase3, tvec_flight2 ])
return {'t': tvec_final,
'x': x_final,
'y': y_final,
'vx': vx_final,
'vy': vy_final,
'sim_fail': sim_fail,
'dk': dk,
'dL': dL,
#'sim_res':sim_res,
#'sim_phase2': sim_phase2_cut,
#'t_phase2': t_phase2_cut
}
def SLIP_step3D(IC,SLIP_params,sim_params = []):
"""
simulates the SLIP in 3D
IC: initial state vector, containing y0, vx0, vz0
(x0 is assumed to be 0;
z0 is assumed to be 0;
vy0 = 0 (apex);
also ground level = 0)
SLIP_params:
k
L0
m
alpha : "original" angle of attack
beta : lateral leg turn
foot position relative to CoM in flight:
xF = vx0*t_flight + L0*cos(alpha)*cos(beta)
yF = -L0*sin(alpha)
zF = vz0*t_flight - L0*cos(alpha)*sin(beta)
dE: energy change in "midstance" by changing k and L0
g: gravity (negative! should be ~ -9.81 for SI)
"""
alpha = SLIP_params['alpha']
beta = SLIP_params['beta']
k = SLIP_params['k']
L0 = SLIP_params['L0']
dE = SLIP_params['dE']
g = SLIP_params['g']
m = SLIP_params['m']
y0 = IC[0]
vx0 = IC[1]
vz0 = IC[2]
if g >= 0:
raise ValueError, "gravity points into wrong direction!"
# concatenate state vector of four elements:
# (1) time to touchdown
# (2) time to vy = 0
# (3) time to takeoff
# (4) time to apex
# (1) and (4) are analytically described
y_land = L0*sin(alpha)
if y0 < y_land:
raise ValueError, "invalid starting condition"
# before starting, define the model:
def SLIP_ode(y,t,params):
"""
defines the ODE of the SLIP, under stance condition
state:
[x
y
z
vx
vy
vz]
params:
{'L0' : leg rest length
'x0' : leg touchdown position
'k' : spring stiffness
'm' : mass
'xF' : anterior foot position
'zF' : lateral foot position }
"""
dy0 = y[3]
dy1 = y[4]
dy2 = y[5]
L = sqrt((y[0]-params['xF'])**2 + y[1]**2 + (y[2]-params['zF'])**2)
F = params['k']*(params['L0']-L)
Fx = F*(y[0]-params['xF'])/L
Fy = F*y[1]/L
Fz = F*(y[2]-params['zF'])/L
dy3 = Fx/m
dy4 = Fy/m + params['g']
dy5 = Fz/m
return hstack([dy0,dy1,dy2,dy3,dy4,dy5])
def sim_until(IC, params, stop_fcn, tmax = 2.):
"""
simulated the SLIP_ode until stop_fcn has a zero-crossing
includes a refinement of the time at this instant
stop_fcn must be a function of the system state, e.g.
stop_fcn(IC) must exist
this function is especially adapted to the SLIP state,
so it uses dot(x1) = x3, dot(x2) = x4
tmax: maximal simulation time [s]
"""
init_sign = sign(stop_fcn(IC))
#1st: evaluate a certain fraction
tvec_0 = .001*arange(50)
sim_results = []
sim_tvecs = []
newIC = IC
sim_results.append (odeint(SLIP_ode,newIC,tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot = 0.
while min(check_vec) > 0:
newIC = sim_results[-1][-1,:]
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
t_tot += tvec_0[-1]
# time exceeded or ground hit
if t_tot > tmax or min(sim_results[-1][:,1] < 0):
raise SimFailError, "simulation failed"
# now: zero-crossing detected
# -> refine!
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen!"
# refine simulation by factor 50, but only for two
# adjacent original time frames
newIC = sim_results[-1][minidx-1,:]
sim_results[-1] = sim_results[-1][:minidx,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx]
# avoid that last position can be the zero-crossing
n_refine = 100
tvec_0 = linspace(tvec_0[0], tvec_0[1] + 2./n_refine, n_refine+2)
sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
args=(params,),rtol=1e-9))
sim_tvecs.append(tvec_0)
# linearly interpolate to zero
check_vec = [init_sign*stop_fcn(x) for x in sim_results[-1]]
minidx = find(array(check_vec) < 0)[0]
if minidx == 0:
# this should not happen because the first value in
# check_vec should be BEFORE the zero_crossing by
# construction
raise ValueError, "ERROR: this should not happen! (2)"
# compute location of zero-crossing
y0 = sim_results[-1][minidx-1,:]
y1 = sim_results[-1][minidx,:]
fcn0 = stop_fcn(y0)
fcn1 = stop_fcn(y1)
t0 = tvec_0[minidx-1]
t1 = tvec_0[minidx]
t_zero = t0 - (t1-t0)*fcn0/(fcn1 - fcn0)
# cut last simulation result and replace last values
# by interpolated values
sim_results[-1] = sim_results[-1][:minidx+1,:]
sim_tvecs[-1] = sim_tvecs[-1][:minidx+1]
for coord in arange(sim_results[-1].shape[1]):
sim_results[-1][-1,coord] = interp(
t_zero, [t0,t1],
[sim_results[-1][-2,coord], sim_results[-1][-1,coord]] )
sim_tvecs[-1][-1] = t_zero
#newIC = sim_results[-1][minidx-1,:]
#sim_results[-1] = sim_results[-1][:minidx,:]
#sim_tvecs[-1] = sim_tvecs[-1][:minidx]
#tvec_0 = linspace(tvec_0[0],tvec_0[1],100)
#sim_results.append ( odeint(SLIP_ode, newIC, tvec_0,
# args=(params,),rtol=1e-9))
#sim_tvecs.append(tvec_0)
# concatenate lists
sim_data = vstack( [x[:-1,:] for x in sim_results[:-1] if x.shape[0] > 1]
+ [sim_results[-1],])
sim_time = [sim_tvecs[0],]
for idx in arange(1,len(sim_tvecs)):
sim_time.append(sim_tvecs[idx] + sim_time[-1][-1])
sim_time = hstack([x[:-1] for x in sim_time[:-1]] + [sim_time[-1],])
return sim_data, sim_time
# Section 1: time to touchdown
# TODO: make sampling frequency regular
t_flight1 = sqrt(-2.*(y0 - y_land)/g)
#t_flight = sqrt()
tvec_flight1 = .01*arange(t_flight1*100.)
vy_flight1 = tvec_flight1*g
y_flight1 = y0 + .5*g*(tvec_flight1**2)
x_flight1 = vx0*tvec_flight1
vx_flight1 = vx0*ones_like(tvec_flight1)
z_flight1 = vz0*tvec_flight1
vz_flight1 = vz0*ones_like(tvec_flight1)
x_TD = vx0*t_flight1
z_TD = vz0*t_flight1
# Section 2: time to vy = 0
# approach: calculate forward -> estimate interval of
# zero position of vy -> refine simulation in that interval
# until a point with vy sufficiently close to zero is in the
# resulting vector
params = {'L0' : L0,
'xF' : t_flight1*vx0 + L0*cos(alpha)*cos(beta),
'zF' : t_flight1*vz0 - L0*cos(alpha)*sin(beta),
'k' : k,
'm' : m,
'g' : g}
IC = array([x_TD, y_land, z_TD, vx0, t_flight1*g, vz0])
# initial guess: L0*cos(alpha)/vx0
#t_sim1 = L0*cos(alpha)/vx0
# TODO: implement sim_fail check!
sim_fail = False
try:
sim_phase2, t_phase2 = sim_until(IC,params,lambda x: x[4])
t_phase2 += t_flight1
except SimFailError:
print 'simulation aborted (phase 2)\n'
sim_fail = True
# Phase 3:
if not sim_fail:
L = sqrt(sim_phase2[-1,1]**2
+ (sim_phase2[-1,0]-params['xF'])**2
+ (sim_phase2[-1,2]-params['zF'])**2 )
dk, dL = dk_dL(L0,k,L,dE)
params2 = deepcopy(params)
params2['k'] += dk
params2['L0'] += dL
IC = sim_phase2[-1,:]
compression = (lambda x: sqrt(
(x[0]-params2['xF'])**2 + x[1]**2
+(x[2]-params['zF'])**2)
- params2['L0'] )
#print ('L:', L, 'dk', dk, 'dL', dL, 'dE', dE, '\ncompression:', compression(IC),
# 'IC', IC)
try:
sim_phase3, t_phase3 = sim_until(IC, params2,compression)
sim_phase3 = sim_phase3[1:,:]
t_phase3 = t_phase3[1:] + t_phase2[-1]
except SimFailError:
print 'simulation aborted (phase 3)\n'
sim_fail = True
# Phase 4:
if not sim_fail:
# time to apex
# TODO: make sampling frequency regular
vy_liftoff = sim_phase3[-1,4]
#vz_liftoff = sim_phase3[-1,5]
t_flight2 = -1.*vy_liftoff/g
#t_flight = sqrt()
tvec_flight2 = arange(t_flight2,0,-.001)[::-1]
vy_flight2 = tvec_flight2*g + vy_liftoff
y_flight2 = (sim_phase3[-1,1] + vy_liftoff*tvec_flight2
+ .5*g*(tvec_flight2**2) )
x_flight2 = sim_phase3[-1,0] + sim_phase3[-1,3]*tvec_flight2
vx_flight2 = sim_phase3[-1,3]*ones_like(tvec_flight2)
z_flight2 = sim_phase3[-1,2] + sim_phase3[-1,5]*tvec_flight2
vz_flight2 = sim_phase3[-1,5]*ones_like(tvec_flight2)
#print tvec_flight2
tvec_flight2 += t_phase3[-1]
# todo: return data until error
if sim_fail:
return { 't': None,
'x': None,
'y': None,
'z': None,
'vx': None,
'vy': None,
'vz': None,
'sim_fail': sim_fail,
'dk': None,
'dL': None
}
# finally: concatenate phases
x_final = hstack([x_flight1, sim_phase2[:,0], sim_phase3[:,0], x_flight2 ])
y_final = hstack([y_flight1, sim_phase2[:,1], sim_phase3[:,1], y_flight2 ])
z_final = hstack([z_flight1, sim_phase2[:,2], sim_phase3[:,2], z_flight2 ])
vx_final= hstack([vx_flight1, sim_phase2[:,3], sim_phase3[:,3], vx_flight2])
vy_final= hstack([vy_flight1, sim_phase2[:,4], sim_phase3[:,4], vy_flight2])
vz_final= hstack([vz_flight1, sim_phase2[:,5], sim_phase3[:,5], vz_flight2])
tvec_final = hstack([tvec_flight1, t_phase2, t_phase3, tvec_flight2 ])
return {'t': tvec_final,
'x': x_final,
'y': y_final,
'z': z_final,
'vx': vx_final,
'vy': vy_final,
'vz': vz_final,
'sim_fail': sim_fail,
'dk': dk,
'dL': dL,
#'sim_res':sim_res,
#'sim_phase2': sim_phase2_cut,
#'t_phase2': t_phase2_cut
}
|
gpl-2.0
| -5,720,251,461,055,179,000
| 33.332298
| 89
| 0.501131
| false
| 3.167168
| false
| false
| false
|
rasbt/protein-science
|
tutorials/substructure_alignment/Scripts/multimol2_rmsd_align.py
|
1
|
1043
|
# Sebastian Raschka 2014
#
# Aligns multiple mol2 files to a reference mol2 files and
# writes the aligned targets to the hard drive.
#
# USAGE from command shell command line:
# %> python3 multimol2_rmsd_align.py input_dir/ output_dir/ ref.mol2 smiles_string
import subprocess
import os
import sys
RMSD_TOOL = "/soft/linux64/.../oechem-utilities/rmsd" # put the correct path to the RMSD bin here
try:
assert len(sys.argv) == 5
INPUT_DIR = sys.argv[1]
TARGET_DIR = sys.argv[2]
REFERENCE_MOL = sys.argv[3]
SMILES = sys.argv[4]
if not os.path.exists(TARGET_DIR):
os.mkdir(TARGET_DIR)
for i in [m for m in os.listdir(INPUT_DIR) if m.endswith('.mol2')]:
in_mol = INPUT_DIR + '/' + i
out_mol = TARGET_DIR + '/' + i
subprocess.call("{} -in {} -ref {} -overlay -out {} -smarts '{}'".format(
RMSD_TOOL, in_mol, REFERENCE_MOL, out_mol, SMILES), shell=True)
except:
print("ERROR\nUSAGE: python3 multimol2_rmsd_align.py input_dir/ output_dir/ ref.mol2 smiles_string")
|
gpl-3.0
| 2,310,414,354,662,612,000
| 30.606061
| 104
| 0.651007
| false
| 2.929775
| false
| false
| false
|
qxsch/QXSConsolas
|
examples/CopyThat/copyThat/CTSplunk/Test.py
|
1
|
2084
|
#!/usr/bin/python
import logging, os
from QXSConsolas.Cli import CliApp
from QXSConsolas.Command import SSH, call
@CliApp(
Name = "Tests something",
Description = "A very nice description cannot live without the text",
Opts = [
{ "argument": "--name:", "default": None, "multiple": True, "description": "den namen eingeben", "valuename": "NAME" },
{ "argument": "--verbose::", "default": 0, "description": "schwatzen?", "valuename": "VERBOSITY" },
{ "argument": "-v::", "default": 0, "references": "--verbose::", "valuename": "VERBOSITY" },
{ "argument": "--name=", "default": None, "description": "", "valuename": "NAME"}
]
)
def Test(app):
print("Hello " + os.getlogin() + " - (Real user even after sudo / su)")
print("Options:")
print(app.options)
print("Arguments:")
print(app.arguments)
print("System Configuration:")
print(app.configuration)
if not app.data is None:
print("Data:")
print(app.data.dump())
# iterate the configuration keys
s = ""
for key in app.data:
s = s + " " + str(app.data[key])
print(s.strip())
print("")
# injected logger
app.logger.warning("hello from the injected loggger")
# Using explicitely the root logger always logs to the console
logging.debug("This is an info of the root logger")
# Logging from myapp.lib
myapp_cli_logger = logging.getLogger('myapp.cli')
myapp_cli_logger.info("This is an info from myapp.cli") # Not recorded
myapp_cli_logger.warning("This is a warning from myapp.cli") # -> sample.log
myapp_cli_logger.error("This is an error from myapp.cli") # -> sample.log
myapp_cli_logger.critical("This is a critical from myapp.cli") # -> sample.log
print(call(["echo", ["hi", "$x", "a"]], shell = True))
print(call(["./test.sh", "QXS"], shell = True))
print(call(["./test.sh", "QXS"], shell = False))
print(1/0)
|
gpl-3.0
| -3,096,518,374,377,826,000
| 41.530612
| 136
| 0.573417
| false
| 3.526227
| false
| false
| false
|
bmazin/ARCONS-pipeline
|
flatcal/illuminationCal.py
|
1
|
16204
|
#!/bin/python
"""
Author: Matt Strader Date:August 19,2012
Opens a twilight flat h5 and makes the spectrum of each pixel.
Then takes the median of each energy over all pixels
A factor is then calculated for each energy in each pixel of its
twilight count rate / median count rate
The factors are written out in an h5 file
"""
import sys,os
import tables
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from functools import partial
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
from util.popup import PopUp,plotArray,pop
from util.ObsFile import ObsFile
from util.readDict import readDict
from util.FileName import FileName
from util.utils import nearestNRobustMeanFilter
import hotpix.hotPixels as hp
from astropy.stats.funcs import sigma_clip
def onscroll_cbar(fig, event):
if event.inaxes is fig.cbar.ax:
increment=0.05
currentClim = fig.cbar.mappable.get_clim()
if event.button == 'up':
newClim = (currentClim[0],(1.+increment)*currentClim[1])
if event.button == 'down':
newClim = (currentClim[0],(1.-increment)*currentClim[1])
fig.cbar.mappable.set_clim(newClim)
fig.canvas.draw()
def onclick_cbar(fig,event):
if event.inaxes is fig.cbar.ax:
if event.button == 1:
fig.oldClim = fig.cbar.mappable.get_clim()
fig.cbar.mappable.set_clim(fig.oldClim[0],event.ydata*fig.oldClim[1])
fig.canvas.draw()
if event.button == 3:
fig.oldClim = fig.cbar.mappable.get_clim()
fig.cbar.mappable.set_clim(fig.oldClim[0],1/event.ydata*fig.oldClim[1])
fig.canvas.draw()
class FlatCal:
def __init__(self,paramFile):
"""
opens flat file,sets wavelength binnning parameters, and calculates flat factors for the file
"""
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['sunsetDate']
flatTstamp = self.params['flatTstamp']
wvlSunsetDate = self.params['wvlSunsetDate']
wvlTimestamp = self.params['wvlTimestamp']
obsSequence = self.params['obsSequence']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = self.params['deadtime'] #from firmware pulse detection
self.intTime = self.params['intTime']
self.timeSpacingCut = self.params['timeSpacingCut']
self.nSigmaClip = self.params['nSigmaClip']
self.nNearest = self.params['nNearest']
obsFNs = [FileName(run=run,date=sunsetDate,tstamp=obsTstamp) for obsTstamp in obsSequence]
self.obsFileNames = [fn.obs() for fn in obsFNs]
self.obsList = [ObsFile(obsFileName) for obsFileName in self.obsFileNames]
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
print len(self.obsFileNames), 'flat files to co-add'
self.flatCalFileName = FileName(run=run,date=sunsetDate,tstamp=flatTstamp).illumSoln()
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
for iObs,obs in enumerate(self.obsList):
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
#Temporary step, remove old hotpix file
#if os.path.exists(timeMaskFileName):
# os.remove(timeMaskFileName)
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(self.obsFileNames[iObs],timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
self.wvlFlags = self.obsList[0].wvlFlagTable
self.nRow = self.obsList[0].nRow
self.nCol = self.obsList[0].nCol
print 'files opened'
#self.wvlBinWidth = params['wvlBinWidth'] #angstroms
self.energyBinWidth = self.params['energyBinWidth'] #eV
self.wvlStart = self.params['wvlStart'] #angstroms
self.wvlStop = self.params['wvlStop'] #angstroms
self.wvlBinEdges = ObsFile.makeWvlBins(self.energyBinWidth,self.wvlStart,self.wvlStop)
self.intTime = self.params['intTime']
self.countRateCutoff = self.params['countRateCutoff']
self.fractionOfChunksToTrim = self.params['fractionOfChunksToTrim']
#wvlBinEdges includes both lower and upper limits, so number of bins is 1 less than number of edges
self.nWvlBins = len(self.wvlBinEdges)-1
#print 'wrote to',self.flatCalFileName
def __del__(self):
pass
def loadFlatSpectra(self):
self.spectralCubes = []#each element will be the spectral cube for a time chunk
self.cubeEffIntTimes = []
self.frames = []
for iObs,obs in enumerate(self.obsList):
print 'obs',iObs
for firstSec in range(0,obs.getFromHeader('exptime'),self.intTime):
print 'sec',firstSec
cubeDict = obs.getSpectralCube(firstSec=firstSec,integrationTime=self.intTime,weighted=False,wvlBinEdges = self.wvlBinEdges,timeSpacingCut = self.timeSpacingCut)
cube = np.array(cubeDict['cube'],dtype=np.double)
effIntTime = cubeDict['effIntTime']
#add third dimension for broadcasting
effIntTime3d = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
cube /= effIntTime3d
cube[np.isnan(cube)]=0
frame = np.sum(cube,axis=2) #in counts per sec
#correct nonlinearity due to deadtime in firmware
nonlinearFactors = 1. / (1. - frame*self.deadtime)
nonlinearFactors[np.isnan(nonlinearFactors)]=0.
frame = frame * nonlinearFactors
nonlinearFactors = np.reshape(nonlinearFactors,np.shape(nonlinearFactors)+(1,))
cube = cube * nonlinearFactors
self.frames.append(frame)
self.spectralCubes.append(cube)
self.cubeEffIntTimes.append(effIntTime3d)
obs.file.close()
self.spectralCubes = np.array(self.spectralCubes)
self.cubeEffIntTimes = np.array(self.cubeEffIntTimes)
self.countCubes = self.cubeEffIntTimes * self.spectralCubes
self.spectralCubes = self.intTime * self.spectralCubes # in counts
def checkCountRates(self):
medianCountRates = np.array([np.median(frame[frame!=0]) for frame in self.frames])
boolIncludeFrames = medianCountRates <= self.countRateCutoff
#boolIncludeFrames = np.logical_and(boolIncludeFrames,medianCountRates >= 200)
#mask out frames, or cubes from integration time chunks with count rates too high
self.spectralCubes = np.array([cube for cube,boolIncludeFrame in zip(self.spectralCubes,boolIncludeFrames) if boolIncludeFrame==True])
self.frames = [frame for frame,boolIncludeFrame in zip(self.frames,boolIncludeFrames) if boolIncludeFrame==True]
print 'few enough counts in the chunk',zip(medianCountRates,boolIncludeFrames)
def calculateWeights(self):
"""
finds illum cal factors by making an image per wavelength bin, then smoothing it and dividing the mean of the smoothedImage over pixels by the smoothedImage
"""
cubeWeightsList = []
self.averageSpectra = []
deltaWeightsList = []
self.totalCube = np.sum(self.spectralCubes,axis=0) #sum all cubes
self.totalFrame = np.sum(self.totalCube,axis=-1)#sum over wvl
weights = []
for iWvl in xrange(self.nWvlBins):
wvlSlice = self.totalCube[:,:,iWvl]
wvlSlice[wvlSlice == 0] = np.nan
nanMask = np.isnan(wvlSlice)
#do a sigma-clipping on the wvlSlice and insert it back in the cube
maskedWvlSlice = np.ma.array(wvlSlice,mask=nanMask)
clippedWvlSlice = sigma_clip(wvlSlice,sig=self.nSigmaClip,iters=None,cenfunc=np.ma.median)
wvlSlice[clippedWvlSlice.mask] = np.nan
self.totalCube[:,:,iWvl] = wvlSlice
#do a smoothing over the slice
smoothedWvlSlice = nearestNRobustMeanFilter(wvlSlice,n=self.nNearest,nSigmaClip=self.nSigmaClip)
wvlIllumWeights = np.mean(smoothedWvlSlice)/smoothedWvlSlice
weights.append(wvlIllumWeights)
self.weights = np.array(weights)
#move the wvl dimension to the end
self.weights = np.swapaxes(self.weights,0,1)
self.weights = np.swapaxes(self.weights,1,2)
self.deltaWeights = np.zeros_like(self.weights)
self.flags = np.zeros_like(self.weights)
def plotWeightsWvlSlices(self,verbose=True):
flatCalPath,flatCalBasename = os.path.split(self.flatCalFileName)
pdfBasename = os.path.splitext(flatCalBasename)[0]+'_wvlSlices.pdf'
pdfFullPath = os.path.join(flatCalPath,pdfBasename)
pp = PdfPages(pdfFullPath)
nPlotsPerRow = 2
nPlotsPerCol = 4
nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
iPlot = 0
if verbose:
print 'plotting weights in wavelength sliced images'
matplotlib.rcParams['font.size'] = 4
wvls = self.wvlBinEdges[0:-1]
cmap = matplotlib.cm.hot
cmap.set_bad('0.15')
for iWvl,wvl in enumerate(wvls):
if verbose:
print 'wvl ',iWvl
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_title(r'Weights %.0f $\AA$'%wvl)
image = self.weights[:,:,iWvl]
handleMatshow = ax.matshow(image,cmap=cmap,origin='lower',vmax=1.5,vmin=.5)
cbar = fig.colorbar(handleMatshow)
if iPlot%nPlotsPerPage == nPlotsPerPage-1:
pp.savefig(fig)
iPlot += 1
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_title(r'Twilight Image %.0f $\AA$'%wvl)
image = self.totalCube[:,:,iWvl]
nSdev = 3.
goodImage = image[np.isfinite(image)]
vmax = np.mean(goodImage)+nSdev*np.std(goodImage)
handleMatshow = ax.matshow(image,cmap=cmap,origin='lower',vmax=vmax)
cbar = fig.colorbar(handleMatshow)
if iPlot%nPlotsPerPage == nPlotsPerPage-1:
pp.savefig(fig)
iPlot += 1
pp.savefig(fig)
pp.close()
def plotWeightsByPixel(self,verbose=True):
flatCalPath,flatCalBasename = os.path.split(self.flatCalFileName)
pdfBasename = os.path.splitext(flatCalBasename)[0]+'.pdf'
pdfFullPath = os.path.join(flatCalPath,pdfBasename)
pp = PdfPages(pdfFullPath)
nPlotsPerRow = 2
nPlotsPerCol = 4
nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
iPlot = 0
if verbose:
print 'plotting weights by pixel at ',pdfFullPath
matplotlib.rcParams['font.size'] = 4
wvls = self.wvlBinEdges[0:-1]
nCubes = len(self.spectralCubes)
for iRow in xrange(self.nRow):
if verbose:
print 'row',iRow
for iCol in xrange(self.nCol):
weights = self.weights[iRow,iCol,:]
deltaWeights = self.deltaWeights[iRow,iCol,:]
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_ylim(.5,2.)
weights = self.weights[iRow,iCol]
ax.errorbar(wvls,weights,yerr=deltaWeights,label='weights',color='k')
ax.set_title('p %d,%d'%(iRow,iCol))
ax.set_ylabel('weight')
ax.set_xlabel(r'$\lambda$ ($\AA$)')
#ax.plot(wvls,flatSpectrum,label='pixel',alpha=.5)
#ax.legend(loc='lower left')
#ax2.legend(loc='lower right')
if iPlot%nPlotsPerPage == nPlotsPerPage-1 or (iRow == self.nRow-1 and iCol == self.nCol-1):
pp.savefig(fig)
iPlot += 1
#Put a plot of twilight spectrums for this pixel
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.plot(wvls,self.totalCube[iRow,iCol,:],label='spectrum')
ax.set_title('p %d,%d'%(iRow,iCol))
ax.set_xlabel(r'$\lambda$ ($\AA$)')
ax.set_ylabel('twilight cps')
#ax.plot(wvls,flatSpectrum,label='pixel',alpha=.5)
#ax.legend(loc='lower left')
#ax2.legend(loc='lower right')
if iPlot%nPlotsPerPage == nPlotsPerPage-1 or (iRow == self.nRow-1 and iCol == self.nCol-1):
pp.savefig(fig)
#plt.show()
iPlot += 1
pp.close()
def writeWeights(self):
"""
Writes an h5 file to put calculated flat cal factors in
"""
if os.path.isabs(self.flatCalFileName) == True:
fullFlatCalFileName = self.flatCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
flatDir = os.path.join(scratchDir,'flatCalSolnFiles')
fullFlatCalFileName = os.path.join(flatDir,self.flatCalFileName)
try:
flatCalFile = tables.openFile(fullFlatCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flat cal file, ',fullFlatCalFileName
return
print 'wrote to',self.flatCalFileName
calgroup = flatCalFile.createGroup(flatCalFile.root,'flatcal','Table of flat calibration weights by pixel and wavelength')
caltable = tables.Array(calgroup,'weights',object=self.weights,title='Illumination calibration Weights indexed by pixelRow,pixelCol,wavelengthBin')
errtable = tables.Array(calgroup,'errors',object=self.deltaWeights,title='Errors in Weights indexed by pixelRow,pixelCol,wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.flags,title='Illumination cal flags indexed by pixelRow,pixelCol,wavelengthBin. 0 is Good. By default, all are good.')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
flatCalFile.flush()
flatCalFile.close()
npzFileName = os.path.splitext(fullFlatCalFileName)[0]+'.npz'
#calculate total spectra and medians for programs that expect old format flat cal
spectra = np.array(np.sum(self.spectralCubes,axis=0))
wvlAverages = np.zeros(self.nWvlBins)
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins ])
np.savez(npzFileName,binEdges=self.wvlBinEdges,spectra=spectra,weights=self.weights,deltaWeights=self.deltaWeights,totalFrame=self.totalFrame,totalCube=self.totalCube,spectralCubes=self.spectralCubes,countCubes=self.countCubes,cubeEffIntTimes=self.cubeEffIntTimes )
if __name__ == '__main__':
paramFile = sys.argv[1]
flatcal = FlatCal(paramFile)
flatcal.loadFlatSpectra()
flatcal.checkCountRates()
flatcal.calculateWeights()
flatcal.writeWeights()
flatcal.plotWeightsWvlSlices()
flatcal.plotWeightsByPixel()
|
gpl-2.0
| 2,251,571,697,630,836,500
| 43.152589
| 273
| 0.630462
| false
| 3.548839
| false
| false
| false
|
MateuszG/django-user-example
|
app/settings.py
|
1
|
2129
|
"""
Django settings for app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7%se66*%_c%^+$q27nukm93$yo@-_km4tt3&61u52b9hbbp9!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'user.User'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
|
mit
| 5,518,857,587,328,920,000
| 23.755814
| 71
| 0.720996
| false
| 3.235562
| false
| false
| false
|
MockyJoke/numbers
|
ex1/code/monthly_totals.py
|
1
|
3064
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
def get_precip_data():
return pd.read_csv('precipitation.csv', parse_dates=[2])
def date_to_month(d):
return '%04i-%02i' % (d.year, d.month)
def pivot_months_pandas(data):
"""
Create monthly precipitation totals for each station in the data set.
This should use Pandas methods to manipulate the data.
"""
# ...
#d = d.set_index('date').groupby('name').resample('M').sum()
month_col = data['date'].apply(date_to_month)
data = data.assign(month = month_col)
monthly = data.drop(['station', 'latitude','longitude','elevation','date'], axis=1)
monthly = monthly.groupby(['name','month']).sum().reset_index()
monthly = monthly.pivot(index='name',columns='month',values= "precipitation")
counts = data.drop(['station', 'latitude','longitude','elevation',"precipitation"], axis=1)
counts = counts.groupby(['name','month']).count().reset_index()
counts = counts.pivot(index='name',columns='month',values= "date")
return monthly, counts
def pivot_months_loops(data):
"""
Create monthly precipitation totals for each station in the data set.
This does it the hard way: using Pandas as a dumb data store, and iterating in Python.
"""
# Find all stations and months in the data set.
stations = set()
months = set()
for i,r in data.iterrows():
stations.add(r['name'])
m = date_to_month(r['date'])
months.add(m)
# Aggregate into dictionaries so we can look up later.
stations = sorted(list(stations))
row_to_station = dict(enumerate(stations))
station_to_row = {s: i for i,s in row_to_station.items()}
months = sorted(list(months))
col_to_month = dict(enumerate(months))
month_to_col = {m: i for i,m in col_to_month.items()}
# Create arrays for the data, and fill them.
precip_total = np.zeros((len(row_to_station), 12), dtype=np.uint)
obs_count = np.zeros((len(row_to_station), 12), dtype=np.uint)
for _, row in data.iterrows():
m = date_to_month(row['date'])
r = station_to_row[row['name']]
c = month_to_col[m]
precip_total[r, c] += row['precipitation']
obs_count[r, c] += 1
# Build the DataFrames we needed all along (tidying up the index names while we're at it).
totals = pd.DataFrame(
data=precip_total,
index=stations,
columns=months,
)
totals.index.name = 'name'
totals.columns.name = 'month'
counts = pd.DataFrame(
data=obs_count,
index=stations,
columns=months,
)
counts.index.name = 'name'
counts.columns.name = 'month'
return totals, counts
def main():
data = get_precip_data()
#totals, counts = pivot_months_loops(data)
totals, counts = pivot_months_pandas(data)
totals.to_csv('totals.csv')
counts.to_csv('counts.csv')
np.savez('monthdata.npz', totals=totals.values, counts=counts.values)
if __name__ == '__main__':
main()
|
mit
| 3,098,951,167,550,225,000
| 27.635514
| 95
| 0.620431
| false
| 3.385635
| false
| false
| false
|
AsymmetricVentures/asym-logging
|
asymmetricbase/logging/audit.py
|
1
|
4522
|
# -*- coding: utf-8 -*-
# Asymmetric Base Framework - A collection of utilities for django frameworks
# Copyright (C) 2013 Asymmetric Ventures Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
class AuditLoggingHandler(logging.Handler):
"""
Performs our Audit logging. If there is a model included in the record,
we will also include Object Retention info
"""
def __init__(self, *args, **kwargs):
super(AuditLoggingHandler, self).__init__(*args, **kwargs)
self.django_request = None
def _get_current_user_info(self):
pass
def emit(self, record):
log_generator = AuditLogGenerator(self.django_request, record)
log_generator.generate()
class AuditLogGenerator(object):
def __init__(self, request, record):
self.request = request
self.record = record
def generate(self):
from django.conf import settings
if getattr(settings, 'IS_IN_TEST', False):
return
if not hasattr(self, 'django_request') or self.django_request is None:
return
self._get_access_type()
self._get_log_type()
self._get_success()
if self._do_ignore_log():
return
self._get_current_user_info()
self._get_ip()
self._get_model()
self._get_view_name()
self._save_object_contnt()
self._save_log_entry()
def _save_log_entry(self):
from django.db import transaction
from .models import AuditEntry
with transaction.commit_on_success():
l = AuditEntry(
log_type = self.log_type,
access_type = self.access_type,
user_id = self.user.id if self.user is not None else None,
ip = self.ip,
message = self.record.msg,
model_name = self.model_str,
view_name = self.view_name,
success = self.success,
object_content = self.object_content,
)
l.save()
def _save_object_contnt(self):
from .models import ObjectContent
from django.core import serializers
if not self._is_save_object_content_required():
self.object_content = None
return
# serializer only accepts iterables!
content_in_json = serializers.serialize('json', [self.model], ensure_ascii = False)
oc = ObjectContent(content_in_json = content_in_json)
oc.save()
self.object_content = oc
def _is_save_object_content_required(self):
from .models import LogEntryType, AccessType
if self.log_type != LogEntryType.MODEL:
return False
if self.access_type not in (AccessType.ADD, AccessType.WRITE):
return False
if not self.success:
return False
return True
def _get_current_user_info(self):
try:
self.user = self.request.user
except AttributeError:
self.user = None
pass
def _get_ip(self):
self.ip = self.request.META['REMOTE_ADDR']
def _get_access_type(self):
try:
self.access_type = self.record.access_type
except AttributeError:
from .models import AccessType
self.access_type = AccessType.OTHER
def _get_log_type(self):
try:
self.log_type = self.record.log_type
except AttributeError:
from .models import LogEntryType
self.log_type = LogEntryType.OTHER
def _get_model(self):
try:
self.model = self.record.model
self.model_str = u"{model.__class__.__name__}.{model.id}".format(model = self.model)
except AttributeError:
self.model = None
self.model_str = None
def _get_view_name(self):
try:
self.view_name = self.record.view_name
except AttributeError:
self.view_name = None
def _get_success(self):
try:
self.success = self.record.success
except AttributeError:
self.success = None
def _do_ignore_log(self):
from django.conf import settings
from .models import LogEntryType, AccessType
if (not settings.LOG_MODEL_ACCESS_READ) and \
self.log_type == LogEntryType.MODEL and \
self.access_type == AccessType.READ and \
self.success == True:
return True
return False
|
gpl-2.0
| 5,634,017,856,155,444,000
| 27.086957
| 87
| 0.700796
| false
| 3.291121
| false
| false
| false
|
nathanielvarona/airflow
|
airflow/sensors/bash.py
|
1
|
3395
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from subprocess import PIPE, STDOUT, Popen
from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir
from airflow.sensors.base import BaseSensorOperator
class BashSensor(BaseSensorOperator):
"""
Executes a bash command/script and returns True if and only if the
return code is 0.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: output encoding of bash command.
:type output_encoding: str
"""
template_fields = ('bash_command', 'env')
def __init__(self, *, bash_command, env=None, output_encoding='utf-8', **kwargs):
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
def poke(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
bash_command = self.bash_command
self.log.info("Tmp dir root location: \n %s", gettempdir())
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
f.write(bytes(bash_command, 'utf_8'))
f.flush()
fname = f.name
script_location = tmp_dir + "/" + fname
self.log.info("Temporary script location: %s", script_location)
self.log.info("Running command: %s", bash_command)
# pylint: disable=subprocess-popen-preexec-fn
with Popen(
['bash', fname],
stdout=PIPE,
stderr=STDOUT,
close_fds=True,
cwd=tmp_dir,
env=self.env,
preexec_fn=os.setsid,
) as resp:
self.log.info("Output:")
for line in iter(resp.stdout.readline, b''):
line = line.decode(self.output_encoding).strip()
self.log.info(line)
resp.wait()
self.log.info("Command exited with return code %s", resp.returncode)
return not resp.returncode
|
apache-2.0
| -6,741,692,652,340,570,000
| 39.416667
| 88
| 0.619735
| false
| 4.449541
| false
| false
| false
|
Leopardob/dice-dev
|
core_apps/Home/core_app.py
|
1
|
1888
|
import os
from PyQt5.QtCore import pyqtSignal, pyqtProperty, qDebug, pyqtSlot
from dice.dice_extras.core_app import CoreApp
from dice.dice_extras.tools.json_sync import JsonList
class Home(CoreApp):
def __init__(self, parent=None):
super(Home, self).__init__(parent)
settings_folder = os.path.join(os.path.expanduser("~"), ".config", "DICE")
if not os.path.exists(settings_folder):
os.makedirs(settings_folder)
self.__recent_projects = JsonList(os.path.join(settings_folder, "recent_projects.json"))
self.__max_recent_projects = 10 # TODO: get this value from settings
recent_projects_changed = pyqtSignal(name="recentProjectsChanged")
@property
def recent_projects(self):
return self.__recent_projects.to_simple_list()
recentProjects = pyqtProperty("QVariantList", fget=recent_projects.fget, notify=recent_projects_changed)
def add_recent_project(self, project_name, location):
recent_locations = [recent_project['location'] for recent_project in self.__recent_projects]
recent_project = {'projectName': project_name, 'location': location}
if location not in recent_locations:
self.__recent_projects.insert(0, recent_project)
while len(self.__recent_projects) > self.__max_recent_projects:
self.__recent_projects.pop()
self.recent_projects_changed.emit()
else:
# add the project on top of the list
index = self.__recent_projects.index(recent_project)
if index != 0:
self.__recent_projects.pop(index)
self.__recent_projects.insert(0, recent_project)
self.recent_projects_changed.emit()
@pyqtSlot(name="closeProject")
def close_project(self):
self.dice.project.close()
self.dice.desk.clear_workspace()
|
gpl-3.0
| -5,843,003,365,226,793,000
| 39.191489
| 108
| 0.652542
| false
| 3.949791
| false
| false
| false
|
PressLabs/lithium
|
lithium/views/base.py
|
1
|
1507
|
from functools import wraps
import json
from flask import request, Response
from flask.ext.classy import FlaskView
def get_request_type():
types = {
'application/json': 'json',
'application/xml': 'xml'
}
if 'Content-Type' in request.headers:
if request.headers['Content-Type'] in types:
return types[request.headers['Content-Type']]
return 'html'
def serialize_response(request_type, response):
serializers = {
'json': lambda response: json.dumps(response),
'xml': lambda response: json.dumps(response),
}
if isinstance(response, basestring) or isinstance(response, Response):
return response
if request_type in serializers:
return serializers[request_type](response)
return json.dumps(response)
def serialize(f):
@wraps(f)
def decorator(*args, **kwargs):
response = f(*args, **kwargs)
request_type = get_request_type()
return serialize_response(request_type, response)
return decorator
class class_property(property):
def __get__(self, instance, type):
if instance is None:
return super(class_property, self).__get__(type, type)
return super(class_property, self).__get__(instance, type)
class BaseView(FlaskView):
__decorators = [serialize]
def __init__(self, *args, **kwargs):
super(BaseView, self).__init__(*args, **kwargs)
@class_property
def decorators(cls):
return cls.__decorators
@decorators.setter
def decorators(cls, decorator):
cls.__decorators.insert(0, decorator)
|
apache-2.0
| 7,260,151,536,983,540,000
| 23.306452
| 72
| 0.689449
| false
| 3.844388
| false
| false
| false
|
bloem-project/bloem-server
|
files/tasks.py
|
1
|
1132
|
import hashlib
import os
import logging
from celery import shared_task
from .models import File, Directory
logger = logging.getLogger()
@shared_task
def scan_directory(root, type):
"""Walk through a directory and add files matching a certain pattern to the database."""
for path, dirs, files in os.walk(root):
for file in files:
logger.debug("Found a file with filename {0}.".format(file))
try:
if File.objects.get(file_name=file).objects.filter(file_name=file).exists():
logger.debug("File is already in the database. Skipping.")
continue
except File.DoesNotExist:
hasher = hashlib.sha256()
with open(os.path.join(path, file), 'rb') as _file:
for chunk in iter(lambda: _file.read(65536), b""):
hasher.update(chunk)
_hash = hasher.hexdigest()
_directory = Directory.objects.get(path=root)
entry = File(hash=_hash, file_name=file, directory=_directory, path=path)
entry.save()
|
gpl-3.0
| -8,059,939,730,043,644,000
| 38.034483
| 92
| 0.582155
| false
| 4.337165
| false
| false
| false
|
cgwire/zou
|
zou/app/api.py
|
1
|
4415
|
import os
import sys
from zou.app.utils import events, api as api_utils
from flask import Blueprint
from .blueprints.assets import blueprint as assets_blueprint
from .blueprints.auth import blueprint as auth_blueprint
from .blueprints.breakdown import blueprint as breakdown_blueprint
from .blueprints.comments import blueprint as comments_blueprint
from .blueprints.crud import blueprint as crud_blueprint
from .blueprints.events import blueprint as events_blueprint
from .blueprints.export import blueprint as export_blueprint
from .blueprints.files import blueprint as files_blueprint
from .blueprints.index import blueprint as index_blueprint
from .blueprints.news import blueprint as news_blueprint
from .blueprints.persons import blueprint as persons_blueprint
from .blueprints.playlists import blueprint as playlists_blueprint
from .blueprints.projects import blueprint as projects_blueprint
from .blueprints.previews import blueprint as previews_blueprint
from .blueprints.source import blueprint as import_blueprint
from .blueprints.shots import blueprint as shots_blueprint
from .blueprints.tasks import blueprint as tasks_blueprint
from .blueprints.user import blueprint as user_blueprint
def configure(app):
"""
Turn Flask app into a REST API. It configures routes, auth and events
system.
"""
app.url_map.strict_slashes = False
configure_api_routes(app)
register_event_handlers(app)
load_plugins(app)
return app
def configure_api_routes(app):
"""
Register blueprints (modules). Each blueprint describe routes and
associated resources (controllers).
"""
app.register_blueprint(auth_blueprint)
app.register_blueprint(assets_blueprint)
app.register_blueprint(breakdown_blueprint)
app.register_blueprint(comments_blueprint)
app.register_blueprint(crud_blueprint)
app.register_blueprint(export_blueprint)
app.register_blueprint(events_blueprint)
app.register_blueprint(files_blueprint)
app.register_blueprint(import_blueprint)
app.register_blueprint(index_blueprint)
app.register_blueprint(news_blueprint)
app.register_blueprint(persons_blueprint)
app.register_blueprint(playlists_blueprint)
app.register_blueprint(projects_blueprint)
app.register_blueprint(shots_blueprint)
app.register_blueprint(tasks_blueprint)
app.register_blueprint(previews_blueprint)
app.register_blueprint(user_blueprint)
return app
def register_event_handlers(app):
"""
Load code from event handlers folder. Then it registers in the event manager
each event handler listed in the __init_.py.
"""
sys.path.insert(0, app.config["EVENT_HANDLERS_FOLDER"])
try:
import event_handlers
events.register_all(event_handlers.event_map, app)
except ImportError:
# Event handlers folder is not properly configured.
# Handlers are optional, that's why this error is ignored.
app.logger.info("No event handlers folder is configured.")
return app
def load_plugins(app):
"""
Load plugin, (bunch of resources dedicated to a specific usage).
"""
if os.path.exists(app.config["PLUGIN_FOLDER"]):
plugins = load_plugin_modules(app.config["PLUGIN_FOLDER"])
for plugin in plugins:
load_plugin(app, plugin)
def load_plugin_modules(plugin_folder):
"""
Run Python import on all plugin listed in plugin folder. It returns the
imported module.
"""
sys.path.insert(0, plugin_folder)
return [
__import__(file_name)
for file_name in os.listdir(plugin_folder)
if os.path.isdir(os.path.join(plugin_folder, file_name))
and file_name != "__pycache__"
]
def load_plugin(app, plugin):
"""
Load a given plugin as an API plugin: add configured routes to the API. It
assumes that the plugin is already loaded in memory has a blueprint
structure.
"""
routes = [
("/plugins%s" % route_path, resource)
for (route_path, resource) in plugin.routes
if len(route_path) > 0 and route_path[0] == "/"
]
plugin.routes = routes
plugin.blueprint = Blueprint(plugin.name, plugin.name)
plugin.api = api_utils.configure_api_from_blueprint(
plugin.blueprint, plugin.routes
)
app.register_blueprint(plugin.blueprint)
app.logger.info("Plugin %s loaded." % plugin.name)
return plugin
|
agpl-3.0
| -2,686,815,057,494,757,400
| 34.32
| 80
| 0.726614
| false
| 4.084181
| true
| false
| false
|
velenux/photofix
|
photofix.py
|
1
|
7000
|
# encoding: utf-8
import os
import sys
# for file hash calculation
import hashlib
# datetime manipulation
from datetime import datetime
# exif tags
from gi.repository import GObject, GExiv2
# for moving files and dirs
import shutil
import errno
# configuration
VALID_IMAGES = set(['.cr2', '.cr3', '.crw', '.dng', '.png', '.jpg', '.jpeg', '.tif', '.tiff', '.gpr'])
VALID_VIDEO = set(['.mp4', '.mkv'])
PATH = {
'image': 'storage/images',
'video': 'storage/video',
'non-image': 'storage/non-images',
'duplicate': 'storage/duplicates',
'failed': 'storage/failed'
}
DUP_COUNTER = 0
TS = datetime.strftime(datetime.now(), "%Y-%m-%d")
EXISTING_FILES = set([])
#
# useful function from
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
#
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
#
# get_file_datetime(filename)
# retrieves the EXIF date, falls back to filesystem date
#
def get_file_datetime(filename):
fs_date = datetime.fromtimestamp(os.path.getmtime(filename))
#print "%s fs_date: %s" % (filename, fs_date.strftime("%s")) # debug
try:
exif_date = GExiv2.Metadata(filename).get_date_time()
#print "%s exif_date: %s" % (filename, exif_date.strftime("%s")) # debug
# avoid using the epoch if possible
if (int(fs_date.strftime("%s")) == 0 or fs_date > exif_date):
return exif_date
else:
return fs_date
except:
return fs_date
#
# get_file_hash(filename)
# returns the sha256 sum for the file as a string
#
def get_file_hash(filename):
sha = hashlib.sha256()
with open(filename, 'rb') as fp:
buf = fp.read(262144)
while len(buf) > 0:
sha.update(buf)
buf = fp.read(262144)
return sha.hexdigest()
#
# move_file(filename, destination)
# moves the file and outputs the source and destination for logging
#
def move_file(filename, destination):
global PATH
global DUP_COUNTER
(original_directory, original_filename) = os.path.split(filename)
(destination_directory, destination_filename) = os.path.split(destination)
(original_base_filename, original_extension) = os.path.splitext(original_filename)
destination_hash = destination_filename[16:]
# if the destination is a directory, rebuild the destination with
# directory and original filename so it becomes a full path
if os.path.isdir(destination):
destination = os.path.join(destination, original_filename)
# handle destination links
if os.path.islink(destination):
print('WARNING: destination', destination, 'is a link, redirecting', filename, 'to failed')
newdest = os.path.join(PATH['failed'], original_filename)
return move_file(filename, newdest)
# handle duplicates
if os.path.isfile(destination) or destination_hash in EXISTING_FILES:
print('WARNING:', filename, 'seems like a duplicate, redirecting...')
DUP_COUNTER += 1
if (original_filename != destination_filename):
# if the filenames are different, save the original one for reference
newdest = os.path.join(PATH['duplicate'], original_base_filename + '_' + str(DUP_COUNTER) + '-' + destination_filename)
else:
newdest = os.path.join(PATH['duplicate'], original_base_filename + '_' + str(DUP_COUNTER) + '.' + original_extension)
return move_file(filename, newdest)
mkdir_p(destination_directory)
print('mv to', destination)
try:
shutil.move(filename, destination)
if destination_directory.startswith(PATH['image']):
EXISTING_FILES.add(destination_hash)
except:
print('WARNING: failed to move', filename, 'to', destination, 'redirecting to failed...')
#
# explore_path(path)
# recursively iterates on path, moving images around
#
def explore_path(path):
for root, dirs, files in os.walk(path):
for f in files:
fullfn = os.path.join(root, f)
# skip symlinks and files that have already been moved (eg. xmp files)
if not os.path.isfile(fullfn): continue
# save the file name and extension
# in the base of sidecar files, bn will be the original image
# /path/to/image.ext.xmp -> /path/to/image.ext + .xmp
bn, ext = os.path.splitext(fullfn)
ext = ext.lower()
# print the file we're working on
print(fullfn)
# handle different types of files
if ext in VALID_IMAGES:
handle_image(fullfn)
continue
elif ext in VALID_VIDEO:
handle_video(fullfn)
continue
elif ext == '.xmp' and os.path.isfile(bn):
# skip sidecar files with matching images: they will be handled
# during the original image handling pass
continue
else:
move_file(fullfn, PATH['non-image'])
continue
for d in dirs:
fulldn = os.path.join(root, d)
# skip symlinks
if os.path.islink(fulldn): continue
# recursively calls itself to check the other directories
explore_path(fulldn)
#
# handle_image(filename)
# renames and moves the single image
#
def handle_image(fullfn):
# get filename and extension
dir, fn = os.path.split(fullfn) # dir and filename
bn, ext = os.path.splitext(fn) # basename and extension
ext = ext.lower() # lowercase extension
# recover metadata from the image
file_date = get_file_datetime(fullfn)
file_hash = get_file_hash(fullfn)
# destination is: PATH['image']/TS/YYYY/mm/YYYYmmdd-HHMMSS_HASH.EXTENSION
destfn = os.path.join(PATH['image'], TS, file_date.strftime("%Y"), file_date.strftime("%m"), file_date.strftime("%Y%m%d-%H%M%S") + '_' + file_hash + ext)
# move the file
move_file(fullfn, destfn)
# if there is an XMP sidecar file, move that as well
for f in os.listdir(dir):
f_low = f.lower()
if f.startswith(fn) and f_low.endswith('.xmp'):
move_file(os.path.join(dir, f), destfn + '.xmp')
#
# handle_video(filename)
# recursively iterates on path, moving videos around
#
def handle_video(fullfn):
# get filename and extension
fn = os.path.split(fullfn)[1] # filename
bn, ext = os.path.splitext(fn) # basename and extension
ext = ext.lower() # lowercase extension
# recover metadata from the video
file_date = get_file_datetime(fullfn)
# destination is: PATH['video']/TS/YYYY/mm/YYYYmmdd-HHMMSS_HASH.EXTENSION
destfn = os.path.join(PATH['video'], TS, file_date.strftime("%Y"), file_date.strftime("%m"), file_date.strftime("%Y%m%d-%H%M%S") + '_' + bn + ext)
move_file(fullfn, destfn)
# fai girare sul primo argomento
explore_path(sys.argv[1])
|
gpl-2.0
| 2,211,774,835,877,225,000
| 31.55814
| 157
| 0.632857
| false
| 3.589744
| false
| false
| false
|
ANGELHACK-JARVIS/safe-locality
|
app.py
|
1
|
9649
|
from flask import Flask, render_template, json, request, redirect, session
from flask.ext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
import pygal
from pygal.style import BlueStyle, NeonStyle,DarkSolarizedStyle, LightSolarizedStyle, LightColorizedStyle, DarkColorizedStyle, TurquoiseStyle
app = Flask(__name__)
GoogleMaps(app)
app.secret_key = 'ssh...Big secret!'
#MySQL configurations
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'tekken5'
app.config['MYSQL_DATABASE_DB'] = 'safelocality'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
# route to index.html
@app.route("/")
def main():
if(session.get('user')):
return render_template('home.html',session = session)
else:
return render_template('home.html')
# route to signup.html
@app.route('/showSignUp')
def showSignUp():
return render_template('signup.html')
# interact with MySQL for sign up
@app.route('/signUp',methods=['POST'])
def signUp():
try:
_name = request.form['inputName']
_email = request.form['inputEmail']
_password = request.form['inputPassword']
_firstname = request.form['inputFirstName']
_lastname = request.form['inputLastName']
# validate the received values
if _name and _email and _password:
# All Good, let's call MySQL
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc('sp_createUser',(_name,_firstname,_lastname,_email,_hashed_password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return redirect('/showSignin')
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
@app.route('/showSignin')
def showSignin():
return render_template('signin.html')
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['inputEmail']
_password = request.form['inputPassword']
# connect to mysql
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if check_password_hash(str(data[0][5]),_password):
session['user'] = data[0][0]
print "here"
return render_template('home.html')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
except Exception as e:
return render_template('error.html',error = str(e))
finally:
cursor.close()
con.close()
@app.route('/dashboard')
def demo():
newDict = {}
with open('Places.txt','r') as lines:
for i in lines:
k=i.split(',')
v=k[2].strip("\n").strip("\r")
cord=[k[1],v]
newDict[k[0]] = cord
conn = mysql.connect()
cursor = conn.cursor()
#No need to repeatedly create and delete tuples from the Coordinates table
#place=[]
#lat=[]
#lon=[]
#k=0
#print newDict
#for i in newDict:
# place.append(i)
# lat.append(float(newDict[i][0]))
# lon.append(float(newDict[i][1]))
#cursor.callproc('sp_addLoc',('dfsd',12.12,12.1234,))
#for i in range(0,len(place)):
# cursor.callproc('sp_addLoc',(place[i],lat[i],lon[i]))
#cursor.execute("DELETE FROM Coordinates WHERE Loc_id<6 and Loc_id>8")
cursor.execute("SELECT Loc_name FROM Coordinates ORDER BY Loc_name DESC")
data = cursor.fetchall()
print data
conn.commit()
cursor.close()
conn.close()
if(session.get('user')):
return render_template('dashboard.html', data = data,session=session)
else:
return render_template('dashboard.html',data = data)
######################################################################################33
#This is the review form implementation
@app.route('/addStats')
def displayForm():
return render_template('addStats.html')
@app.route('/addStats', methods=['POST'])
def takeData():
locale=str(request.form['inputLocale'])
water=int(request.form['inputWater'])
electricity=int(request.form['inputElectricity'])
network=int(request.form['inputNetworkAvailability'])
cleanliness=int(request.form['inputCleanliness'])
green=int(request.form['inputGreenSpace'])
life=int(request.form['inputNightlife'])
rmen=int(request.form['inputRepairmenAvailability'])
edu=int(request.form['inputeducation'])
nhood=int(request.form['inputNeighbourhood'])
lent=int(request.form['inputLocalEntertainment'])
rev=str(request.form['inputReview'])
uid=int(session.get('user'))
conn=mysql.connect()
cur=conn.cursor()
cur.execute("Select Loc_id from Coordinates where Loc_name=%s",(locale))
lid=int(cur.fetchone()[0])
cur.execute("Insert into Review (UserId,Loc_id,review_text) values(%s,%s,%s)",(uid,lid,rev))
conn.commit()
cur.callproc('sp_addStats',(uid,lid,water,electricity,network,cleanliness, green, lent, life, rmen, edu, nhood))
conn.commit()
cur.close()
conn.close()
return render_template('home.html')
######################################################################################
@app.route('/places/<place_name>/')
def places(place_name):
if session.get('user'):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Coordinates WHERE Loc_name = %s", (place_name))
data = cursor.fetchall()[0]
name=data[1]
conn.commit()
cursor.close()
conn.close()
mymap = Map(
identifier="view-side",
lat=data[2],
lng=data[3],
markers=[(37.4419, -122.1419)]
)
lat = data[2]
lon = data[3]
#The graph is made and passed on from here onwards
###################################################
title="Crime Rates"
crime_graph=pygal.Bar(width=600, height=600, explicit_size=True, title=title, style=BlueStyle, disable_xml_declaration=True, range=(0,10))
crime_labels=['Theft','Violence', 'Harassment']
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("select avg(Theft), avg(Violence), avg(Harassment) from Security, Coordinates where Coordinates.Loc_id=Security.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
crime_values=[data1[0],data1[1],data1[2]]
crime_graph.x_labels=crime_labels
crime_graph.add('Rating', crime_values)
lifestyle_graph=pygal.Bar(width=1200, height=600, explicit_size=True, title="Living Standards", style=BlueStyle, disable_xml_declaration=True, range=(0,10))
cursor.close()
conn.close()
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("SELECT avg(Water), avg(Electricity), avg(Network_Availability), avg(Cleanliness), avg(Green_space), avg(Local_Entertainment), avg(NightLife), avg(Repairmen_avail), avg(Education), avg(Neighbourhood) from LifeStyle, Coordinates where Coordinates.Loc_id=LifeStyle.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
lifestyle_values=[data1[0], data1[1], data1[2], data1[3], data1[4], data1[5], data1[6], data1[7], data1[8], data1[9]]
lifestyle_labels=["Water", "Electricity", "Network Availability", "Cleanliness", "Green Space", "Local Entertainment", "Night Life", "Services", "Education", "Neighbourhood"]
lifestyle_graph.x_labels=lifestyle_labels
lifestyle_graph.add('Rating', lifestyle_values)
graphs=[crime_graph, lifestyle_graph]
cursor.close()
conn.close()
########################################################
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Review where Loc_id = (SELECT Loc_id from Coordinates where Loc_name=%s) ",(place_name))
dat = cursor.fetchall()
use_fec=[]
for review in dat:
cursor.execute("SELECT UserName from User where UserId = %s", review[0])
use_fec.append([cursor.fetchone()[0],review[2]])
print use_fec
return render_template('demo.html', use_fec=use_fec, rev_data=dat,name=name, mymap=mymap, data=data,lat = data[2], lon=data[3], graphs=graphs,dat=dat)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/demo')
def userHome():
if session.get('user'):
mymap = Map(
identifier="view-side",
lat=37.4419,
lng=-122.1419,
markers=[(37.4419, -122.1419)]
)
return render_template('demo.html', mymap=mymap)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/logout')
def logout():
session.pop('user',None)
return render_template('home.html')
if __name__ == "__main__":
app.debug = True
app.run()
|
mit
| -2,559,263,458,218,067,500
| 36.691406
| 335
| 0.611462
| false
| 3.612505
| false
| false
| false
|
darren-wang/gl
|
glance/api/v1/upload_utils.py
|
1
|
12541
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
import glance.db
from glance import i18n
import glance.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotAuthenticated as e:
# Delete image data due to possible token expiration.
LOG.debug("Authentication error - the token may have "
"expired during file upload. Deleting image data for "
" %s " % image_id)
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req)
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data
|
apache-2.0
| -4,218,632,327,757,525,500
| 41.225589
| 79
| 0.553225
| false
| 4.560364
| false
| false
| false
|
Hellowlol/plexpy
|
plexpy/activity_handler.py
|
1
|
11652
|
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
import time
import plexpy
from plexpy import logger, pmsconnect, activity_processor, threading, notification_handler, helpers
class ActivityHandler(object):
def __init__(self, timeline):
self.timeline = timeline
# print timeline
def is_valid_session(self):
if 'sessionKey' in self.timeline:
if str(self.timeline['sessionKey']).isdigit():
return True
return False
def get_session_key(self):
if self.is_valid_session():
return int(self.timeline['sessionKey'])
return None
def get_live_session(self):
pms_connect = pmsconnect.PmsConnect()
session_list = pms_connect.get_current_activity()
for session in session_list['sessions']:
if int(session['session_key']) == self.get_session_key():
return session
return None
def update_db_session(self):
# Update our session temp table values
monitor_proc = activity_processor.ActivityProcessor()
monitor_proc.write_session(session=self.get_live_session(), notify=False)
def on_start(self):
if self.is_valid_session() and self.get_live_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has started." % str(self.get_session_key()))
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=self.get_live_session(), notify_action='play')).start()
# Write the new session to our temp session table
self.update_db_session()
def on_stop(self, force_stop=False):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has stopped." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=None)
# Update the session state and viewOffset
# Set force_stop to true to disable the state set
if not force_stop:
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='stop')).start()
# Write it to the history table
monitor_proc = activity_processor.ActivityProcessor()
monitor_proc.write_session_history(session=db_session)
# Remove the session from our temp session table
ap.delete_session(session_key=self.get_session_key())
def on_pause(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has been paused." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=int(time.time()))
# Update the session state and viewOffset
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='pause')).start()
def on_resume(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s has been resumed." % str(self.get_session_key()))
# Set the session last_paused timestamp
ap = activity_processor.ActivityProcessor()
ap.set_session_last_paused(session_key=self.get_session_key(), timestamp=None)
# Update the session state and viewOffset
ap.set_session_state(session_key=self.get_session_key(),
state=self.timeline['state'],
view_offset=self.timeline['viewOffset'])
# Retrieve the session data from our temp table
db_session = ap.get_session_by_key(session_key=self.get_session_key())
# Fire off notifications
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='resume')).start()
def on_buffer(self):
if self.is_valid_session():
logger.debug(u"PlexPy ActivityHandler :: Session %s is buffering." % self.get_session_key())
ap = activity_processor.ActivityProcessor()
db_stream = ap.get_session_by_key(session_key=self.get_session_key())
# Increment our buffer count
ap.increment_session_buffer_count(session_key=self.get_session_key())
# Get our current buffer count
current_buffer_count = ap.get_session_buffer_count(self.get_session_key())
logger.debug(u"PlexPy ActivityHandler :: Session %s buffer count is %s." %
(self.get_session_key(), current_buffer_count))
# Get our last triggered time
buffer_last_triggered = ap.get_session_buffer_trigger_time(self.get_session_key())
time_since_last_trigger = 0
if buffer_last_triggered:
logger.debug(u"PlexPy ActivityHandler :: Session %s buffer last triggered at %s." %
(self.get_session_key(), buffer_last_triggered))
time_since_last_trigger = int(time.time()) - int(buffer_last_triggered)
if plexpy.CONFIG.BUFFER_THRESHOLD > 0 and (current_buffer_count >= plexpy.CONFIG.BUFFER_THRESHOLD and \
time_since_last_trigger == 0 or time_since_last_trigger >= plexpy.CONFIG.BUFFER_WAIT):
ap.set_session_buffer_trigger_time(session_key=self.get_session_key())
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_stream, notify_action='buffer')).start()
# This function receives events from our websocket connection
def process(self):
if self.is_valid_session():
ap = activity_processor.ActivityProcessor()
db_session = ap.get_session_by_key(session_key=self.get_session_key())
this_state = self.timeline['state']
this_key = str(self.timeline['ratingKey'])
# If we already have this session in the temp table, check for state changes
if db_session:
last_state = db_session['state']
last_key = str(db_session['rating_key'])
# Make sure the same item is being played
if this_key == last_key:
# Update the session state and viewOffset
if this_state == 'playing':
ap.set_session_state(session_key=self.get_session_key(),
state=this_state,
view_offset=self.timeline['viewOffset'])
# Start our state checks
if this_state != last_state:
if this_state == 'paused':
self.on_pause()
elif last_state == 'paused' and this_state == 'playing':
self.on_resume()
elif this_state == 'stopped':
self.on_stop()
elif this_state == 'buffering':
self.on_buffer()
# If a client doesn't register stop events (I'm looking at you PHT!) check if the ratingKey has changed
else:
# Manually stop and start
# Set force_stop so that we don't overwrite our last viewOffset
self.on_stop(force_stop=True)
self.on_start()
# Monitor if the stream has reached the watch percentage for notifications
# The only purpose of this is for notifications
progress_percent = helpers.get_percent(self.timeline['viewOffset'], db_session['duration'])
if progress_percent >= plexpy.CONFIG.NOTIFY_WATCHED_PERCENT and this_state != 'buffering':
threading.Thread(target=notification_handler.notify,
kwargs=dict(stream_data=db_session, notify_action='watched')).start()
else:
# We don't have this session in our table yet, start a new one.
if this_state != 'buffering':
self.on_start()
class TimelineHandler(object):
def __init__(self, timeline):
self.timeline = timeline
#logger.debug(timeline)
def is_item(self):
if 'itemID' in self.timeline:
return True
return False
def get_rating_key(self):
if self.is_item():
return int(self.timeline['itemID'])
return None
def get_metadata(self):
pms_connect = pmsconnect.PmsConnect()
metadata_list = pms_connect.get_metadata_details(self.get_rating_key())
if metadata_list:
return metadata_list['metadata']
return None
def on_created(self):
if self.is_item():
logger.debug(u"PlexPy TimelineHandler :: Library item %s has been added to Plex." % str(self.get_rating_key()))
# Fire off notifications
threading.Thread(target=notification_handler.notify_timeline,
kwargs=dict(timeline_data=self.get_metadata(), notify_action='created')).start()
# This function receives events from our websocket connection
def process(self):
if self.is_item():
this_state = self.timeline['state']
this_type = self.timeline['type']
this_metadataState = self.timeline.get('metadataState', None)
this_mediaState = self.timeline.get('mediaState', None)
# state: 5: done processing metadata
# type: 1: movie, 2: tv show, 4: episode, 8: artist, 10: track
types = [1, 2, 4, 8, 10]
if this_state == 5 and this_type in types and this_metadataState == None and this_mediaState == None:
self.on_created()
|
gpl-3.0
| 3,072,818,931,053,455,000
| 43.30038
| 123
| 0.588841
| false
| 4.348638
| false
| false
| false
|
olliemath/Python-TinyEvolver
|
examples/Example3.py
|
1
|
1280
|
from tinyevolver import Population
import random
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
random.seed(1234)
"""
For this example we're going to try fiting a power of x to some data:
so e.g. d(t) = ax^n + b should resonably approximate some data
depending on t.
"""
# We want n to be integer, and a,b to be floats, so
prototype = [1.0, 1, 1.0]
# And we restrict the possible genes to these intervals:
bounds = [(0.0, 1.0), (0, 3), (0, 5.0)]
# How fit an individual is will depend on how well it approximates the
# data. So let's cook up some data:
times = range(20)
data = [0.5 * time ** 2 + 1.0 + random.uniform(0, 10) for time in times]
def fitness(ind):
curve = [ind[0] * time ** ind[1] + ind[2] for time in times]
square_error = [(f - d) ** 2 for f, d in zip(curve, data)]
# More error = less fit
try:
return 20.0 / sum(square_error)
except ZeroDivisionError:
return float('inf')
# Now to populate and evolve:
p = Population(prototype, bounds, fitness)
p.populate()
p.evolve()
# Let's see how we did:
if plt:
best_ind = p.best
best_fit = [best_ind[0] * time ** best_ind[1] + best_ind[2] for time in times]
plt.plot(times, data)
plt.plot(times, best_fit)
plt.show()
|
gpl-2.0
| -5,832,435,131,763,731,000
| 25.666667
| 82
| 0.639063
| false
| 2.997658
| false
| false
| false
|
rgerkin/python-neo
|
neo/io/asciispiketrainio.py
|
1
|
3629
|
# -*- coding: utf-8 -*-
"""
Classe for reading/writing SpikeTrains in a text file.
It is the simple case where different spiketrains are written line by line.
Supported : Read/Write
Author: sgarcia
"""
import os
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, SpikeTrain
class AsciiSpikeTrainIO(BaseIO):
"""
Class for reading/writing SpikeTrains in a text file.
Each Spiketrain is a line.
Usage:
>>> from neo import io
>>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
>>> seg = r.read_segment()
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
...
"""
is_readable = True
is_writable = True
supported_objects = [Segment, SpikeTrain]
readable_objects = [Segment]
writeable_objects = [Segment]
has_header = False
is_streameable = False
read_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
('t_start', {'value': 0., }),
]
}
write_params = {
Segment: [
('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
]
}
name = None
extensions = ['txt']
mode = 'file'
def __init__(self, filename=None):
"""
This class read/write SpikeTrains in a text file.
Each row is a spiketrain.
**Arguments**
filename : the filename to read/write
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy=False,
delimiter='\t',
t_start=0. * pq.s,
unit=pq.s,
):
"""
Arguments:
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
t_start : time start of all spiketrain 0 by default
unit : unit of spike times, can be a str or directly a Quantities
"""
assert not lazy, 'Do not support lazy'
unit = pq.Quantity(1, unit)
seg = Segment(file_origin=os.path.basename(self.filename))
f = open(self.filename, 'Ur')
for i, line in enumerate(f):
alldata = line[:-1].split(delimiter)
if alldata[-1] == '':
alldata = alldata[:-1]
if alldata[0] == '':
alldata = alldata[1:]
spike_times = np.array(alldata).astype('f')
t_stop = spike_times.max() * unit
sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
sptr.annotate(channel_index=i)
seg.spiketrains.append(sptr)
f.close()
seg.create_many_to_one_relationship()
return seg
def write_segment(self, segment,
delimiter='\t',
):
"""
Write SpikeTrain of a Segment in a txt file.
Each row is a spiketrain.
Arguments:
segment : the segment to write. Only analog signals will be written.
delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
information of t_start is lost
"""
f = open(self.filename, 'w')
for s, sptr in enumerate(segment.spiketrains):
for ts in sptr:
f.write('{:f}{}'.format(ts, delimiter))
f.write('\n')
f.close()
|
bsd-3-clause
| -4,014,761,197,161,626,000
| 25.683824
| 97
| 0.527142
| false
| 3.714432
| false
| false
| false
|
op3/hdtv
|
hdtv/backgroundmodels/exponential.py
|
1
|
2206
|
# -*- coding: utf-8 -*-
# HDTV - A ROOT-based spectrum analysis software
# Copyright (C) 2006-2009 The HDTV development team (see file AUTHORS)
#
# This file is part of HDTV.
#
# HDTV is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# HDTV is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDTV; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import ROOT
from .background import BackgroundModel
class BackgroundModelExponential(BackgroundModel):
"""
Exponential background model
"""
def __init__(self):
super(BackgroundModelExponential, self).__init__()
self.fParStatus = {"nparams": 2}
self.fValidParStatus = {"nparams": [int, "free"]}
self.ResetParamStatus()
self.name = "exponential"
self.requiredBgRegions = 1
def ResetParamStatus(self):
"""
Reset parameter status to defaults
"""
self.fParStatus["nparams"] = 2
def GetFitter(self, integrate, likelihood, nparams=None, nbg=None):
"""
Creates a C++ Fitter object, which can then do the real work
"""
if nparams is not None:
self.fFitter = ROOT.HDTV.Fit.ExpBg(nparams, integrate, likelihood)
self.fParStatus["nparams"] = nparams
elif isinstance(self.fParStatus["nparams"], int):
self.fFitter = ROOT.HDTV.Fit.ExpBg(
self.fParStatus["nparams"], integrate, likelihood
)
else:
msg = (
"Status specifier %s of background fitter is invalid."
% fParStatus["nparams"]
)
raise ValueError(msg)
self.ResetGlobalParams()
return self.fFitter
|
gpl-2.0
| 4,411,505,221,114,811,400
| 32.424242
| 78
| 0.647325
| false
| 4.040293
| false
| false
| false
|
manfer/LFP.bundle
|
Contents/Code/__init__.py
|
1
|
2659
|
# -*- coding: utf-8 -*-
TITLE = u'LFP'
PREFIX = '/video/lfp'
LFP_BASE_URL = 'http://www.laliga.es'
LFP_MULTIMEDIA = '%s/multimedia' % LFP_BASE_URL
LFP_ICON = 'lfp.png'
ICON = 'default-icon.png'
LFP_HL_ICON = 'highlights.png'
LFP_VIDEO_ICON = 'video.png'
LFP_PHOTO_ICON = 'photo.png'
LFP_LALIGATV_ICON = 'laligatv.png'
SEARCH_ICON = 'search-icon.png'
SETTINGS_ICON = 'settings-icon.png'
ART = 'futbol.jpg'
HTTP_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Origin': LFP_BASE_URL,
'Referer': LFP_MULTIMEDIA
}
from lfputil import L
from lfpvideo import *
from lfpfoto import *
from laligatv import *
from lfpsearch import *
################################################################################
def Start():
Plugin.AddViewGroup('List', viewMode='List', mediaType='items')
Plugin.AddViewGroup('InfoList', viewMode='InfoList', mediaType='items')
Plugin.AddViewGroup('PanelStream', viewMode='PanelStream', mediaType='items')
ObjectContainer.title1 = TITLE
#ObjectContainer.view_group = 'List'
ObjectContainer.art = R(ART)
DirectoryObject.thumb = R(ICON)
DirectoryObject.art = R(ART)
PhotoAlbumObject.thumb = R(ICON)
HTTP.CacheTime = CACHE_1HOUR
################################################################################
@handler(PREFIX, TITLE, art=ART, thumb=LFP_ICON)
def lfp_main_menu():
oc = ObjectContainer()
oc.add(DirectoryObject(
key = Callback(lfp_resumenes),
title = L("Highlights"),
summary = L("enjoy lfp highlight videos"),
thumb = R(LFP_HL_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_videos),
title = L("Other Videos"),
summary = L("enjoy other videos on lfp website"),
thumb = R(LFP_VIDEO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_fotos),
title = L("Photos"),
summary = L("enjoy the photos on lfp website"),
thumb = R(LFP_PHOTO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_laligatv),
title = L("La Liga TV"),
summary = L("enjoy live Adelante League matches"),
thumb = R(LFP_LALIGATV_ICON)
))
if Client.Product != 'PlexConnect':
oc.add(InputDirectoryObject(
key = Callback(lfp_search),
title = L('Search LFP Videos'),
prompt = L('Search for LFP Videos'),
summary = L('Search for LFP Videos'),
thumb = R(SEARCH_ICON)
))
return oc
|
gpl-3.0
| 4,681,088,658,285,820,000
| 27.287234
| 91
| 0.608499
| false
| 2.954444
| false
| false
| false
|
gooddata/openstack-nova
|
nova/context.py
|
1
|
22746
|
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
from contextlib import contextmanager
import copy
import warnings
import eventlet.queue
import eventlet.timeout
from keystoneauth1.access import service_catalog as ksa_service_catalog
from keystoneauth1 import plugin
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _
from nova import objects
from nova import policy
from nova import utils
LOG = logging.getLogger(__name__)
# TODO(melwitt): This cache should be cleared whenever WSGIService receives a
# SIGHUP and periodically based on an expiration time. Currently, none of the
# cell caches are purged, so neither is this one, for now.
CELL_CACHE = {}
# NOTE(melwitt): Used for the scatter-gather utility to indicate we timed out
# waiting for a result from a cell.
did_not_respond_sentinel = object()
# NOTE(melwitt): Used for the scatter-gather utility to indicate an exception
# was raised gathering a result from a cell.
raised_exception_sentinel = object()
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
# trigger.
CELLS = []
# Timeout value for waiting for cells to respond
CELL_TIMEOUT = 60
class _ContextAuthPlugin(plugin.BaseAuthPlugin):
"""A keystoneauth auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None, is_admin=None,
read_deleted="no", remote_address=None, timestamp=None,
quota_class=None, service_catalog=None,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param instance_lock_checked: This is not used and will be removed
in a future release.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
"""
if user_id:
kwargs['user_id'] = user_id
if project_id:
kwargs['project_id'] = project_id
if kwargs.pop('instance_lock_checked', None) is not None:
# TODO(mriedem): Let this be a hard failure in 19.0.0 (S).
warnings.warn("The 'instance_lock_checked' kwarg to "
"nova.context.RequestContext is no longer used and "
"will be removed in a future version.")
super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('image', 'block-storage', 'volumev3',
'key-manager', 'placement', 'network')]
else:
# if list is empty or none
self.service_catalog = []
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
# NOTE(dheeraj): The following attributes are used by cellsv2 to store
# connection information for connecting to the target cell.
# It is only manipulated using the target_cell contextmanager
# provided by this module
self.db_connection = None
self.mq_connection = None
self.cell_uuid = None
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': utils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
})
# NOTE(tonyb): This can be removed once we're certain to have a
# RequestContext contains 'is_admin_project', We can only get away with
# this because we "know" the default value of 'is_admin_project' which
# is very fragile.
values.update({
'is_admin_project': getattr(self, 'is_admin_project', True),
})
return values
@classmethod
def from_dict(cls, values):
return super(RequestContext, cls).from_dict(
values,
user_id=values.get('user_id'),
project_id=values.get('project_id'),
# TODO(sdague): oslo.context has show_deleted, if
# possible, we should migrate to that in the future so we
# don't need to be different here.
read_deleted=values.get('read_deleted', 'no'),
remote_address=values.get('remote_address'),
timestamp=values.get('timestamp'),
quota_class=values.get('quota_class'),
service_catalog=values.get('service_catalog'),
)
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
# context.roles must be deepcopied to leave original roles
# without changes
context.roles = copy.deepcopy(self.roles)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def can(self, action, target=None, fatal=True):
"""Verifies that the given action is valid on the target in this context.
:param action: string representing the action to be checked.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``.
If None, then this default target will be considered:
{'project_id': self.project_id, 'user_id': self.user_id}
:param fatal: if False, will return False when an exception.Forbidden
occurs.
:raises nova.exception.Forbidden: if verification fails and fatal is
True.
:return: returns a non-False value (not necessarily "True") if
authorized and False if not authorized and fatal is False.
"""
if target is None:
target = {'project_id': self.project_id,
'user_id': self.user_id}
try:
return policy.authorize(self, action, target)
except exception.Forbidden:
if fatal:
raise
return False
def to_policy_values(self):
policy = super(RequestContext, self).to_policy_values()
policy['is_admin'] = self.is_admin
return policy
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_context():
"""A helper method to get a blank context.
Note that overwrite is False here so this context will not update the
greenthread-local stored context that is used when logging.
"""
return RequestContext(user_id=None,
project_id=None,
is_admin=False,
overwrite=False)
def get_admin_context(read_deleted="no"):
# NOTE(alaski): This method should only be used when an admin context is
# necessary for the entirety of the context lifetime. If that's not the
# case please use get_context(), or create the RequestContext manually, and
# use context.elevated() where necessary. Some periodic tasks may use
# get_admin_context so that their database calls are not filtered on
# project_id.
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
def set_target_cell(context, cell_mapping):
"""Adds database connection information to the context
for communicating with the given target_cell.
This is used for permanently targeting a cell in a context.
Use this when you want all subsequent code to target a cell.
Passing None for cell_mapping will untarget the context.
:param context: The RequestContext to add connection information
:param cell_mapping: An objects.CellMapping object or None
"""
global CELL_CACHE
if cell_mapping is not None:
# avoid circular import
from nova.db import api as db
from nova import rpc
# Synchronize access to the cache by multiple API workers.
@utils.synchronized(cell_mapping.uuid)
def get_or_set_cached_cell_and_set_connections():
try:
cell_tuple = CELL_CACHE[cell_mapping.uuid]
except KeyError:
db_connection_string = cell_mapping.database_connection
context.db_connection = db.create_context_manager(
db_connection_string)
if not cell_mapping.transport_url.startswith('none'):
context.mq_connection = rpc.create_transport(
cell_mapping.transport_url)
context.cell_uuid = cell_mapping.uuid
CELL_CACHE[cell_mapping.uuid] = (context.db_connection,
context.mq_connection)
else:
context.db_connection = cell_tuple[0]
context.mq_connection = cell_tuple[1]
context.cell_uuid = cell_mapping.uuid
get_or_set_cached_cell_and_set_connections()
else:
context.db_connection = None
context.mq_connection = None
context.cell_uuid = None
@contextmanager
def target_cell(context, cell_mapping):
"""Yields a new context with connection information for a specific cell.
This function yields a copy of the provided context, which is targeted to
the referenced cell for MQ and DB connections.
Passing None for cell_mapping will yield an untargetd copy of the context.
:param context: The RequestContext to add connection information
:param cell_mapping: An objects.CellMapping object or None
"""
# Create a sanitized copy of context by serializing and deserializing it
# (like we would do over RPC). This help ensure that we have a clean
# copy of the context with all the tracked attributes, but without any
# of the hidden/private things we cache on a context. We do this to avoid
# unintentional sharing of cached thread-local data across threads.
# Specifically, this won't include any oslo_db-set transaction context, or
# any existing cell targeting.
cctxt = RequestContext.from_dict(context.to_dict())
set_target_cell(cctxt, cell_mapping)
yield cctxt
def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs):
"""Target cells in parallel and return their results.
The first parameter in the signature of the function to call for each cell
should be of type RequestContext.
:param context: The RequestContext for querying cells
:param cell_mappings: The CellMappings to target in parallel
:param timeout: The total time in seconds to wait for all the results to be
gathered
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
greenthreads = []
queue = eventlet.queue.LightQueue()
results = {}
def gather_result(cell_mapping, fn, context, *args, **kwargs):
cell_uuid = cell_mapping.uuid
try:
with target_cell(context, cell_mapping) as cctxt:
result = fn(cctxt, *args, **kwargs)
except Exception:
LOG.exception('Error gathering result from cell %s', cell_uuid)
result = raised_exception_sentinel
# The queue is already synchronized.
queue.put((cell_uuid, result))
for cell_mapping in cell_mappings:
greenthreads.append((cell_mapping.uuid,
utils.spawn(gather_result, cell_mapping,
fn, context, *args, **kwargs)))
with eventlet.timeout.Timeout(timeout, exception.CellTimeout):
try:
while len(results) != len(greenthreads):
cell_uuid, result = queue.get()
results[cell_uuid] = result
except exception.CellTimeout:
# NOTE(melwitt): We'll fill in did_not_respond_sentinels at the
# same time we kill/wait for the green threads.
pass
# Kill the green threads still pending and wait on those we know are done.
for cell_uuid, greenthread in greenthreads:
if cell_uuid not in results:
greenthread.kill()
results[cell_uuid] = did_not_respond_sentinel
LOG.warning('Timed out waiting for response from cell %s',
cell_uuid)
else:
greenthread.wait()
return results
def load_cells():
global CELLS
if not CELLS:
CELLS = objects.CellMappingList.get_all(get_admin_context())
LOG.debug('Found %(count)i cells: %(cells)s',
dict(count=len(CELLS),
cells=','.join([c.identity for c in CELLS])))
if not CELLS:
LOG.error('No cells are configured, unable to continue')
def scatter_gather_skip_cell0(context, fn, *args, **kwargs):
"""Target all cells except cell0 in parallel and return their results.
The first parameter in the signature of the function to call for
each cell should be of type RequestContext. There is a timeout for
waiting on all results to be gathered.
:param context: The RequestContext for querying cells
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
load_cells()
cell_mappings = [cell for cell in CELLS if not cell.is_cell0()]
return scatter_gather_cells(context, cell_mappings, CELL_TIMEOUT,
fn, *args, **kwargs)
def scatter_gather_single_cell(context, cell_mapping, fn, *args, **kwargs):
"""Target the provided cell and return its results or sentinels in case of
failure.
The first parameter in the signature of the function to call for each cell
should be of type RequestContext.
:param context: The RequestContext for querying cells
:param cell_mapping: The CellMapping to target
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for this cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if the cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to the cell raised an exception. The
exception will be logged.
"""
return scatter_gather_cells(context, [cell_mapping], CELL_TIMEOUT, fn,
*args, **kwargs)
def scatter_gather_all_cells(context, fn, *args, **kwargs):
"""Target all cells in parallel and return their results.
The first parameter in the signature of the function to call for
each cell should be of type RequestContext. There is a timeout for
waiting on all results to be gathered.
:param context: The RequestContext for querying cells
:param fn: The function to call for each cell
:param args: The args for the function to call for each cell, not including
the RequestContext
:param kwargs: The kwargs for the function to call for each cell
:returns: A dict {cell_uuid: result} containing the joined results. The
did_not_respond_sentinel will be returned if a cell did not
respond within the timeout. The raised_exception_sentinel will
be returned if the call to a cell raised an exception. The
exception will be logged.
"""
load_cells()
return scatter_gather_cells(context, CELLS, CELL_TIMEOUT,
fn, *args, **kwargs)
|
apache-2.0
| -4,422,274,522,955,957,000
| 39.763441
| 81
| 0.636859
| false
| 4.402168
| false
| false
| false
|
buzztroll/staccato
|
staccato/scheduler/simple_thread.py
|
1
|
1673
|
import time
import staccato.openstack.common.service as os_service
import staccato.xfer.events as s_events
import staccato.xfer.executor as s_executor
import staccato.xfer.constants as s_constants
from staccato.xfer.constants import Events
import staccato.db as s_db
class SimpleCountSchedler(os_service.Service):
def __init__(self, conf):
super(SimpleCountSchedler, self).__init__()
self.max_at_once = 1
self.db_obj = s_db.StaccatoDB(conf)
self.executor = s_executor.SimpleThreadExecutor(conf)
self.state_machine = s_events.XferStateMachine(self.executor)
self.running = 0
self.done = False
self._started_ids = []
def _poll_db(self):
while not self.done:
time.sleep(1)
self._check_for_transfers()
def _new_transfer(self, request):
self.running += 1
self._started_ids.append(request.id)
self.state_machine.event_occurred(Events.EVENT_START,
xfer_request=request,
db=self.db_obj)
def _transfer_complete(self):
self.running -= 1
def _check_for_transfers(self):
requests = self.db_obj.get_xfer_requests(self._started_ids)
for r in requests:
if s_constants.is_state_done_running(r.state):
self._started_ids.remove(r.id)
avail = self.max_at_once - len(self._started_ids)
xfer_request_ready = self.db_obj.get_all_ready(limit=avail)
for request in xfer_request_ready:
self._new_transfer(request)
def start(self):
self.tg.add_thread(self._poll_db)
|
apache-2.0
| 3,352,977,460,077,363,700
| 33.142857
| 69
| 0.616856
| false
| 3.507338
| false
| false
| false
|
smlng/bgp-stats
|
src/python/bgp-rib-stats.py
|
1
|
13070
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import gzip
import os
import radix
import re
import sys
from bz2 import BZ2File
from datetime import datetime, timedelta
from multiprocessing import Process, Queue, cpu_count
from netaddr import IPSet
# own imports
import mrtx
verbose = False
warning = False
logging = False
re_file_rv = re.compile('rib.(\d+).(\d\d\d\d).bz2')
re_file_rr = re.compile('bview.(\d+).(\d\d\d\d).gz')
re_path_rv = re.compile('.*/([a-z0-9\.-]+)/bgpdata/\d\d\d\d.\d\d/RIBS.*')
re_path_rr = re.compile('.*/(rrc\d\d)/\d\d\d\d.\d\d.*')
reserved_ipv4 = IPSet (['0.0.0.0/8', # host on this network (RFC1122)
'10.0.0.0/8','172.16.0.0/12','192.168.0.0/16', # private address space (RFC1918)
'100.64.0.0/10', # shared address space (RFC6598)
'127.0.0.0/8', # loopback (RFC1122)
'169.254.0.0/16', # linklocal (RFC3927)
'192.0.0.0/24', # special purpose (RFC6890)
'192.0.0.0/29', # DS-lite (RFC6333)
'192.0.2.0/24','198.51.100.0/24','203.0.113.0/24', # test net 1-3 (RFC5737)
'224.0.0.0/4', # multicast address space
'240.0.0.0/4', # future use (RFC1122)
'255.255.255.255/32' # limited broadcast
])
existing_data = list()
'''
OUTPUT FORMAT:
timestamp|date ; input type (RIB|UPDATE) ; source (route-views.xyz| rrcXY) ; \
#ipv4-prefixes/pfxlength (1..32) ; #ipv4 moas ; #ipv4 bogus \
[; #ipv6-prefix/pfxlength ; #ipv6 moas ; #ipv6 bogus ]
NOTE:
- #ips covered can be derived from #pfx/pfx_len
'''
def print_log(*objs):
if logging or verbose:
print("[LOGS] .", *objs, file=sys.stdout)
def print_info(*objs):
if verbose:
print("[INFO] ..", *objs, file=sys.stdout)
def print_warn(*objs):
if warning or verbose:
print("[WARN] ", *objs, file=sys.stderr)
def print_error(*objs):
print("[ERROR] ", *objs, file=sys.stderr)
def loadPtree(fin):
print_log("call loadPtree (%s)" % (fin))
f = (BZ2File(fin, 'rb'), gzip.open(fin, 'rb'))[fin.lower().endswith('.gz')]
data = mrtx.parse_mrt_file(f, print_progress=verbose)
f.close()
ptree = radix.Radix()
for prefix, origins in data.items():
pnode = ptree.add(prefix)
pnode.data['asn'] = list()
for o in list(origins):
if o not in pnode.data['asn']:
pnode.data['asn'].append(str(o))
pnode.data['moas'] = len(pnode.data['asn'])
return ptree
# add num_pfx to stats
def getStats (ptree):
print_log("call getStats")
pfxlen = dict()
asn = dict()
num_pfx_moas = 0
# eval prefix tree
for p in ptree:
pl = int(p.prefixlen)
for a in p.data['asn']:
if a not in asn:
asn[a] = list()
asn[a].append(p.prefix)
if p.data['moas'] > 1:
num_pfx_moas += 1
if pl not in pfxlen:
pfxlen[pl] = list()
pfxlen[pl].append(p.prefix)
# asn results
num_asn = len(asn.keys())
num_asn_pfx = list()
num_asn_ips = list()
for a in asn:
num_asn_pfx.append(len(asn[a]))
num_asn_ips.append(len(IPSet(asn[a])))
# min, max, avg/mean, median
if len(num_asn_pfx) < 1:
num_asn_pfx.append(0)
if len(num_asn_ips) < 1:
num_asn_ips.append(0)
min_asn_pfx = min(num_asn_pfx)
max_asn_pfx = max(num_asn_pfx)
avg_asn_pfx = sum(num_asn_pfx)/len(num_asn_pfx)
med_asn_pfx = sorted(num_asn_pfx)[int(round(len(num_asn_pfx)/2))]
min_asn_ips = min(num_asn_ips)
max_asn_ips = max(num_asn_ips)
avg_asn_ips = sum(num_asn_ips)/len(num_asn_ips)
med_asn_ips = sorted(num_asn_ips)[int(round(len(num_asn_ips)/2))]
# prefix and ip results
pl_dict = dict()
for i in range(32): # init with all 0
pl_dict[i+1] = 0
for pl in pfxlen:
pl_dict[pl] = len(pfxlen[pl])
pkeys = sorted(pfxlen.keys(),reverse=False)
prefixIPs = IPSet()
for pk in pkeys:
print_info ("prefix length: "+str(pk)+", #prefixes: "+ str(len(pfxlen[pk])))
prefixIPs = prefixIPs | IPSet(pfxlen[pk])
num_bogus_ips = len(prefixIPs & reserved_ipv4)
num_pfx_ips = len(prefixIPs)
num_pfx = len(ptree.prefixes())
ret = list()
for i in range(32):
ret.append(pl_dict[i+1])
ret.extend([num_pfx,num_pfx_ips,num_bogus_ips,num_pfx_moas,num_asn])
ret.extend([min_asn_pfx,max_asn_pfx,avg_asn_pfx,med_asn_pfx])
ret.extend([min_asn_ips,max_asn_ips,avg_asn_ips,med_asn_ips])
return ret
stats_header = ["pl01","pl02","pl03","pl04","pl05","pl06","pl07","pl08",
"pl09","pl10","pl11","pl12","pl13","pl14","pl15","pl16",
"pl17","pl18","pl19","pl20","pl21","pl22","pl23","pl24",
"pl25","pl26","pl27","pl28","pl29","pl30","pl31","pl32",
"num_pfx","num_pfx_ips","num_bog_ips","num_pfx_moa","num_asn",
"min_asn_pfx","max_asn_pfx","avg_asn_pfx","med_asn_pfx",
"min_asn_ips","max_asn_ips","avg_asn_ips","med_asn_ips"]
def parseFilename(fin):
print_log("call parseFilename (%s)" % (fin))
maptype = 'none'
subtype = 'none'
pn, fn = os.path.split(fin)
if re_path_rr.match(pn):
m = re_path_rr.match(pn)
maptype = 'riperis'
subtype = m.group(1)
elif re_path_rv.match(pn):
m = re_path_rv.match(pn)
maptype = 'routeviews'
subtype = m.group(1)
else:
print_warn("Unknown BGP data source (pathname).")
date = '19700101'
time = '0000'
if re_file_rr.match(fn):
maptype = 'riperis'
m = re_file_rr.match(fn)
date = m.group(1)
time = m.group(2)
elif re_file_rv.match(fn):
maptype = 'routeviews'
m = re_file_rv.match(fn)
date = m.group(1)
time = m.group(2)
else:
print_warn("Unknown BGP data source (filename).")
dt = "%s-%s-%s %s:%s" % (str(date[0:4]),str(date[4:6]),str(date[6:8]),str(time[0:2]),str(time[2:4]))
ts = int((datetime.strptime(dt, "%Y-%m-%d %H:%M") - datetime(1970, 1, 1)).total_seconds())
return ts, maptype, subtype
def singleWorker(wd, fin):
print_log("call singleWorker(fin: %s)" % (fin))
ts0, mt0, st0 = parseFilename(fin)
if ts0 not in existing_data:
pt0 = loadPtree(fin)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outputStats(wd,dout)
else:
print_info("data set exists, skipping ...")
def statsThread(inq, outq):
print_log("start statsThread")
for fin in iter(inq.get, 'DONE'):
try:
ts0, mt0, st0 = parseFilename(fin)
if ts0 not in existing_data:
pt0 = loadPtree(fin)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outq.put(dout)
else:
print_info("data set exists, skipping ...")
except Exception, e:
print_error("%s failed on %s with: %s" % (current_process().name, url, e.message))
return True
def outputThread(outq, outf):
while True:
odata = outq.get()
if (odata == 'DONE'):
break
try:
outputStats(outf,odata)
except Exception, e:
print_error("%s failed on %s with: %s" % (current_process().name, url, e.message))
return True
output_header = ["# timestamp","maptype","subtype"]
output_header.extend(stats_header)
def outputStats (fout, dout):
output = ';'.join(str(x) for x in dout)
if fout:
with open(fout, "a+") as f:
f.write(output+'\n')
else:
print(output)
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--logging', help='Ouptut logging.', action='store_true')
parser.add_argument('-w', '--warning', help='Output warnings.', action='store_true')
parser.add_argument('-v', '--verbose', help='Verbose output with debug info, logging, and warnings.', action='store_true')
parser.add_argument('-t', '--threads', help='Use threads for parallel and faster processing.', action='store_true', default=False)
parser.add_argument('-n', '--numthreads', help='Set number of threads.', type=int, default=None)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-s', '--single', help='Process a single file, results are printed to STDOUT.')
group.add_argument('-b', '--bulk', help='Process a bunch of files in given directory (optional recursive).')
parser.add_argument('-r', '--recursive', help='Search directories recursivly if in bulk mode.', action='store_true')
parser.add_argument('-f', '--file', help='Write results to file.', default=None)
args = vars(parser.parse_args())
global verbose
verbose = args['verbose']
global warning
warning = args['warning']
global logging
logging = args['logging']
writedata = args['file']
if writedata and os.path.isfile(writedata): # read already written data
with open(writedata, "r") as f:
global existing_data
for line in f:
if line.startswith('#'):
continue
ts = line.split(';')[0].strip()
try:
existing_data.append(int(ts))
except:
print_error("Failure converting timestamp to integer!")
print_info(existing_data[0])
print_log("read %d data sets." % (len(existing_data)))
recursive = args['recursive']
threads = args['threads']
workers = args['numthreads']
if not workers:
workers = cpu_count() / 2
bulk = args['bulk']
single = args['single']
start_time = datetime.now()
print_log("START: " + start_time.strftime('%Y-%m-%d %H:%M:%S'))
if bulk:
print_log('mode: bulk')
if not (os.path.isdir(bulk)):
print_error("Invalid path for bulk processing!")
exit(1)
all_files = []
if recursive:
for dirpath, dirnames, filenames in os.walk(bulk):
for filename in [f for f in filenames if (re_file_rv.match(f) or re_file_rr.match(f))]:
all_files.append(os.path.join(dirpath, filename))
else:
for filename in [f for f in os.listdir(bulk) if (re_file_rv.match(f) or re_file_rr.match(f))]:
all_files.append(os.path.join(bulk, filename))
all_files.sort()
print_log("matching files: %d" % (len(all_files)))
if threads:
input_queue = Queue()
output_queue = Queue()
if len(existing_data) == 0: # write header if no existing data
output_queue.put(output_header)
processes = []
# fill input queue
for f in all_files:
input_queue.put(f)
# start workers to calc stats
for w in xrange(workers):
p = Process(target=statsThread, args=(input_queue,output_queue))
p.start()
processes.append(p)
input_queue.put('DONE')
# start output process to
output_p = Process(target=outputThread, args=(output_queue,writedata))
output_p.start()
for p in processes:
p.join()
output_queue.put('DONE')
output_p.join()
else:
for w in all_files:
singleWorker(writedata, w)
elif single:
print_log("mode: single")
if os.path.isfile(single):
ts0, mt0, st0 = parseFilename(os.path.abspath(single))
if ts0 not in existing_data:
pt0 = loadPtree(single)
stats = getStats(pt0)
dout = [ts0,mt0,st0]
dout.extend(stats)
outputStats(writedata, output_header)
outputStats(writedata, dout)
else:
print_info("data set exists, skipping ...")
else:
print_error("File not found (%s)!" % (single))
else:
print_error("Missing parameter: choose bulk or single mode!")
exit(1)
end_time = datetime.now()
print_log("FINISH: " + end_time.strftime('%Y-%m-%d %H:%M:%S'))
done_time = end_time - start_time
print_log(" processing time [s]: " + str(done_time.total_seconds()))
if __name__ == "__main__":
main()
|
mit
| 6,659,661,468,522,400,000
| 34.137097
| 139
| 0.53619
| false
| 3.293851
| false
| false
| false
|
jtb0/myPiProject
|
editConffile.py
|
1
|
3739
|
#!/usr/bin/env python
#coding: utf8
###############################################################################
# #
# python editConfFile.py </location/to/conffile.conf> <set/get> <section> <variable> <value> #
# #
###############################################################################
import sys
import ConfigParser
DEBUG="false"
true="true"
conffile=sys.argv[1]
if (DEBUG == true) : print "conffile:"
if (DEBUG == true) : print conffile
option=sys.argv[2]
if (DEBUG == true) : print "option:"
if (DEBUG == true) : print option
section=sys.argv[3]
if (DEBUG == true) : print "section"
if (DEBUG == true) : print section
variable=sys.argv[4]
if (DEBUG == true) : print "variable"
if (DEBUG == true) : print variable
value=sys.argv[5]
if (DEBUG == true) : print "value"
if (DEBUG == true) : print value
cp = ConfigParser.ConfigParser()
cp.read(conffile)
def optionSet(conffile, section, variable, value):
if (DEBUG == true) : print "set-Block:"
if (cp.has_section(section)):
cp.set(str(section), str(variable), str(value))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
else :
cp.add_section(section)
cp.set(str(section), str(variable), str(value))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
if (option == "set"): optionSet(conffile, section, variable, value)
def optionGet(conffile, section, variable):
if (DEBUG == true) : print "get-Block:"
print cp.get(str(section), str(variable))
return cp.get(str(section), str(variable))
if (DEBUG == true) : print "end"
if (option == "get"): optionGet(conffile, section, variable)
def optionAppend(conffile, section, variable, value):
if (DEBUG == true) : print "append-Block:"
try:
if (DEBUG == true) : print "try NoOptionError"
#try if there is already an entry at the configfile
cp.has_option(section, variable)
#if there is an entry read the list into the entity list1
list1 = list(eval(cp.get(section, variable), {}, {}))
if (DEBUG == true) : print "Hier kommt die Liste:"
if (DEBUG == true) : print list1
#append the value to the existing list
list1 = list(list1) + list([value])
if (DEBUG == true) : print list1
#persist the new list in the configfile
cp.set(str(section), str(variable), str(list1))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
except ConfigParser.NoOptionError:
if (DEBUG == true) : print "NoOptionError raised"
#if there is no entry for the variable at the conffile the entry will be done by the optionSet method with the value given as list object
optionSet(conffile, section, variable, list([value]))
if (DEBUG == true) : print "NoOptionError raised"
#optionAppend(conffile, section, variable, value)
#else:
if (option == "append"): optionAppend(conffile, section, variable, value)
if (option == "delete") :
if (DEBUG == true) : print "delete-Block:"
deleteList = [value]
if (cp.has_option(section, variable)):
list1 = eval(cp.get(section, variable), {}, {})
if (DEBUG == true) : print "Hier kommt die Liste:"
if (DEBUG == true) : print list1
for index, item in enumerate(list1):
if item in deleteList :
list1.pop(index)
if (DEBUG == true) : print list1
cp.set(str(section), str(variable), str(list1))
with open(str(conffile), 'w') as configfile:
cp.write(configfile)
|
gpl-3.0
| -3,766,158,335,119,960,000
| 35.656863
| 138
| 0.577695
| false
| 3.694664
| true
| false
| false
|
shadowgamefly/news-Digest
|
web/web/settings.py
|
1
|
2732
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.8.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '07n8$i4x*gdwpwux6ehv*^598i6d=&4w@&di!gk$y^s+pe#x0='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'static/templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
|
mit
| 1,399,339,844,919,125,500
| 25.269231
| 71
| 0.689239
| false
| 3.415
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_authentication.py
|
1
|
4537
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_authentication.py
DESCRIPTION:
This sample demonstrates how to authenticate to the Form Recognizer service.
There are two supported methods of authentication:
1) Use a Form Recognizer API key with AzureKeyCredential from azure.core.credentials
2) Use a token credential from azure-identity to authenticate with Azure Active Directory
See more details about authentication here:
https://docs.microsoft.com/azure/cognitive-services/authentication
USAGE:
python sample_authentication.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) AZURE_CLIENT_ID - the client ID of your active directory application.
4) AZURE_TENANT_ID - the tenant ID of your active directory application.
5) AZURE_CLIENT_SECRET - the secret of your active directory application.
"""
import os
class AuthenticationSample(object):
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/Form_1.jpg"
def authentication_with_api_key_credential_form_recognizer_client(self):
# [START create_fr_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(key))
# [END create_fr_client_with_key]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
def authentication_with_azure_active_directory_form_recognizer_client(self):
# [START create_fr_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import FormRecognizerClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
form_recognizer_client = FormRecognizerClient(endpoint, credential)
# [END create_fr_client_with_aad]
poller = form_recognizer_client.begin_recognize_content_from_url(self.url)
result = poller.result()
def authentication_with_api_key_credential_form_training_client(self):
# [START create_ft_client_with_key]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormTrainingClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_training_client = FormTrainingClient(endpoint, AzureKeyCredential(key))
# [END create_ft_client_with_key]
properties = form_training_client.get_account_properties()
def authentication_with_azure_active_directory_form_training_client(self):
# [START create_ft_client_with_aad]
"""DefaultAzureCredential will use the values from these environment
variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
"""
from azure.ai.formrecognizer import FormTrainingClient
from azure.identity import DefaultAzureCredential
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
credential = DefaultAzureCredential()
form_training_client = FormTrainingClient(endpoint, credential)
# [END create_ft_client_with_aad]
properties = form_training_client.get_account_properties()
if __name__ == '__main__':
sample = AuthenticationSample()
sample.authentication_with_api_key_credential_form_recognizer_client()
sample.authentication_with_azure_active_directory_form_recognizer_client()
sample.authentication_with_api_key_credential_form_training_client()
sample.authentication_with_azure_active_directory_form_training_client()
|
mit
| 5,170,395,117,075,362,000
| 44.37
| 156
| 0.707075
| false
| 4.018601
| false
| false
| false
|
christiancg/indoor
|
authenticationdecorator.py
|
1
|
1123
|
from functools import wraps
from flask import request, Response
from modelos import Usuario
from logger import Logger
log = Logger(__name__)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
try:
usr = Usuario.query.filter(Usuario.nombre == username).first()
if usr is None:
return False
elif usr.password != password:
return False
else:
return True
except Exception, ex:
log.exception(ex)
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(response={ 'status':
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials'}, status=401,
headers={'WWW-Authenticate': 'Basic realm="Login Required"'},
mimetype='application/json')
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
|
apache-2.0
| 454,912,826,066,127,740
| 27.794872
| 68
| 0.680321
| false
| 4.02509
| false
| false
| false
|
hyperized/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_lock_info.py
|
1
|
8055
|
#!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_lock_info
version_added: "2.9"
short_description: Manage Azure locks
description:
- Create, delete an Azure lock.
options:
name:
description:
- Name of the lock.
type: str
required: true
managed_resource_id:
description:
- ID of the resource where need to manage the lock.
- Get this via facts module.
- Cannot be set mutal with I(resource_group).
- Manage subscription if both I(managed_resource_id) and I(resource_group) not defined.
- "'/subscriptions/{subscriptionId}' for subscriptions."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
- "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
- Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management.
type: str
resource_group:
description:
- Resource group name where need to manage the lock.
- The lock is in the resource group level.
- Cannot be set mutal with I(managed_resource_id).
- Query subscription if both I(managed_resource_id) and I(resource_group) not defined.
- Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management.
type: str
extends_documentation_fragment:
- azure
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Get myLock details of myVM
azure_rm_lock_info:
name: myLock
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myVM
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: List locks of myResourceGroup
azure_rm_lock_info:
resource_group: myResourceGroup
- name: List locks of myResourceGroup
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
- name: List locks of mySubscription
azure_rm_lock_info:
- name: List locks of mySubscription
azure_rm_lock_info:
managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
RETURN = '''
locks:
description:
- List of locks dicts.
returned: always
type: complex
contains:
id:
description:
- ID of the Lock.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock"
name:
description:
- Name of the lock.
returned: always
type: str
sample: myLock
level:
description:
- Type level of the lock.
returned: always
type: str
sample: can_not_delete
notes:
description:
- Notes of the lock added by creator.
returned: always
type: str
sample: "This is a lock"
''' # NOQA
import json
import re
from ansible.module_utils.common.dict_transformations import _camel_to_snake
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLockInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
managed_resource_id=dict(type='str')
)
self.results = dict(
changed=False,
locks=[]
)
mutually_exclusive = [['resource_group', 'managed_resource_id']]
self.name = None
self.resource_group = None
self.managed_resource_id = None
self._mgmt_client = None
self._query_parameters = {'api-version': '2016-09-01'}
self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
super(AzureRMLockInfo, self).__init__(self.module_arg_spec, facts_module=True, mutually_exclusive=mutually_exclusive, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_lock_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version='2.13')
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
changed = False
# construct scope id
scope = self.get_scope()
url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope)
if self.name:
url = '{0}/{1}'.format(url, self.name)
locks = self.list_locks(url)
resp = locks.get('value') if 'value' in locks else [locks]
self.results['locks'] = [self.to_dict(x) for x in resp]
return self.results
def to_dict(self, lock):
resp = dict(
id=lock['id'],
name=lock['name'],
level=_camel_to_snake(lock['properties']['level']),
managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id'])
)
if lock['properties'].get('notes'):
resp['notes'] = lock['properties']['notes']
if lock['properties'].get('owners'):
resp['owners'] = [x['application_id'] for x in lock['properties']['owners']]
return resp
def list_locks(self, url):
try:
resp = self._mgmt_client.query(url=url,
method='GET',
query_parameters=self._query_parameters,
header_parameters=self._header_parameters,
body=None,
expected_status_codes=[200],
polling_timeout=None,
polling_interval=None)
return json.loads(resp.text)
except CloudError as exc:
self.fail('Error when finding locks {0}: {1}'.format(url, exc.message))
def get_scope(self):
'''
Get the resource scope of the lock management.
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
'''
if self.managed_resource_id:
return self.managed_resource_id
elif self.resource_group:
return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
else:
return '/subscriptions/{0}'.format(self.subscription_id)
def main():
AzureRMLockInfo()
if __name__ == '__main__':
main()
|
gpl-3.0
| 6,295,349,006,659,803,000
| 35.121076
| 156
| 0.611546
| false
| 4.199687
| false
| false
| false
|
CanaimaGNULinux/canaimagnulinux.userdata
|
canaimagnulinux/userdata/userdataschema.py
|
1
|
6237
|
# -*- coding: utf-8 -*-
from canaimagnulinux.userdata import _
from plone.app.users.userdataschema import IUserDataSchema
from plone.app.users.userdataschema import IUserDataSchemaProvider
from zope import schema
from zope.interface import implements
from zope.schema import ValidationError
class TermsNotAccepted(ValidationError):
__doc__ = _(u'You must accept the terms and conditions for registering an account!')
def validateAccept(value):
""" Validate if accepted the terms of use for this site. """
# if value is not True:
# return False
if not value:
raise TermsNotAccepted(value)
return True
def getCommonTerms():
""" Get the common terms of use for this site. """
commonterms_url = 'terminos-y-convenios/condiciones-generales-miembros'
return commonterms_url
class UserDataSchemaProvider(object):
implements(IUserDataSchemaProvider)
def getSchema(self):
""" Get custom User Data Squema """
return IEnhancedUserDataSchema
class IEnhancedUserDataSchema(IUserDataSchema):
""" Use all the fields from the default user data schema,
and add various extra fields.
"""
firstname = schema.TextLine(
title=_(u'label_firstname', default=u'First name'),
description=_(u'help_firstname',
default=u'Fill in your given name.'),
required=True,)
lastname = schema.TextLine(
title=_(u'label_lastname', default=u'Last name'),
description=_(u'help_lastname',
default=u'Fill in your surname or your family name.'),
required=True,)
gender = schema.Choice(
title=_(u'label_gender', default=u'Gender'),
description=_(u'help_gender',
default=u'Male / Female?'),
values=['Male', 'Female'],
required=True,)
birthdate = schema.Date(
title=_(u'label_birthdate', default=u'Birthdate'),
description=_(u'help_birthdate',
default=u'Your date of birth, in the format dd-mm-yyyy'),
required=False,)
mobile = schema.TextLine(
title=_(u'label_mobile', default=u'Mobile'),
description=_(u'help_mobile',
default=u'Fill in your mobile number.'),
required=False,)
officephone = schema.TextLine(
title=_(u'label_officephone', default=u'Office number'),
description=_(u'help_officephone',
default=u'Fill in your office number.'),
required=False,)
irc = schema.TextLine(
title=_(u'label_irc', default=u'IRC nickname'),
description=_(u'help_irc',
default=u'Fill in your IRC nickname.'),
required=False,)
telegram = schema.TextLine(
title=_(u'label_telegram', default=u'Telegram account'),
description=_(u'help_telegram',
default=u'Fill in your Telegram account, in the format @telegram-nickname'),
required=False,)
skype = schema.TextLine(
title=_(u'label_skype', default=u'Skype account'),
description=_(u'help_skype',
default=u'Fill in your skype account.'),
required=False,)
twitter = schema.TextLine(
title=_(u'label_twitter', default=u'Twitter account'),
description=_(u'help_twitter',
default=u'Fill in your Twitter account.'),
required=False,)
instagram = schema.TextLine(
title=_(u'label_instagram', default=u'Instagram account'),
description=_(u'help_instagram',
default=u'Fill in your Instagram account.'),
required=False,)
facebook = schema.TextLine(
title=_(u'label_facebook', default=u'Facebook account'),
description=_(u'help_facebook',
default=u'Fill in your Facebook account.'),
required=False,)
country = schema.TextLine(
title=_(u'label_country', default=u'Country'),
description=_(u'help_country',
default=u'Fill in the country you live in.'),
required=False,)
city = schema.TextLine(
title=_(u'label_city', default=u'City'),
description=_(u'help_city',
default=u'Fill in the city you live in.'),
required=False,)
institution = schema.TextLine(
title=_(u'label_institution', default=u'Institution / Organization'),
description=_(u'help_institution',
default=u'Fill in the institution where you work.'),
required=False,)
instadd = schema.TextLine(
title=_(u'label_instadd', default=u'Institution address'),
description=_(u'help_instadd',
default=u'Fill in the address of the institution where you work.'),
required=False,)
position = schema.TextLine(
title=_(u'label_position', default=u'Current position'),
description=_(u'help_instadd',
default=u'Fill in the current position.'),
required=False,)
profession = schema.TextLine(
title=_(u'label_profession', default=u'Profession'),
description=_(u'help_profession',
default=u'Fill in your profession.'),
required=False,)
# newsletter = schema.Bool(
# title=_(u'label_newsletter', default=u'Subscribe to newsletter'),
# description=_(u'help_newsletter',
# default=u'If you tick this box, we'll subscribe you to "
# "our newsletter.'),
# required=False,)
accept = schema.Bool(
title=_(u'label_accept', default=u'Accept terms of use'),
description=_(u'help_accept',
default=u'Tick this box to indicate that you have found, read and accepted the '
'<a id=\'commonterms\' target=\'_blank\' href=\'' + getCommonTerms() + '\' title=\'Terms of use for this site.\'>terms of use</a> for this site.'),
# description=_(u'help_accept',
# default=u'Tick this box to indicate that you have found,'
# ' read and accepted the terms of use for this site. '),
required=False,
constraint=validateAccept,)
|
gpl-2.0
| -6,964,262,227,185,304,000
| 39.764706
| 169
| 0.601571
| false
| 4.171906
| false
| false
| false
|
jessepeterson/commandment
|
commandment/dep/__init__.py
|
1
|
3899
|
from typing import Set, Dict
from enum import Enum
class SetupAssistantStep(Enum):
"""This enumeration contains all possible steps of Setup Assistant that can be skipped.
See Also:
- `DEP Web Services: Define Profile <https://developer.apple.com/library/content/documentation/Miscellaneous/Reference/MobileDeviceManagementProtocolRef/4-Profile_Management/ProfileManagement.html#//apple_ref/doc/uid/TP40017387-CH7-SW30>`_.
"""
"""Skips Apple ID setup."""
AppleID = 'AppleID'
"""Skips Touch ID setup."""
Biometric = 'Biometric'
"""Disables automatically sending diagnostic information."""
Diagnostics = 'Diagnostics'
"""Skips DisplayTone setup."""
DisplayTone = 'DisplayTone'
"""Disables Location Services."""
Location = 'Location'
"""Hides and disables the passcode pane."""
Passcode = 'Passcode'
"""Skips Apple Pay setup."""
Payment = 'Payment'
"""Skips privacy pane."""
Privacy = 'Privacy'
"""Disables restoring from backup."""
Restore = 'Restore'
SIMSetup = 'SIMSetup'
"""Disables Siri."""
Siri = 'Siri'
"""Skips Terms and Conditions."""
TOS = 'TOS'
"""Skips zoom setup."""
Zoom = 'Zoom'
"""If the Restore pane is not skipped, removes Move from Android option from it."""
Android = 'Android'
"""Skips the Home Button screen in iOS."""
HomeButtonSensitivity = 'HomeButtonSensitivity'
"""Skips on-boarding informational screens for user education (“Cover Sheet, Multitasking & Control Center”,
for example) in iOS."""
iMessageAndFaceTime = 'iMessageAndFaceTime'
"""Skips the iMessage and FaceTime screen in iOS."""
OnBoarding = 'OnBoarding'
"""Skips the screen for Screen Time in iOS."""
ScreenTime = 'ScreenTime'
"""Skips the mandatory software update screen in iOS."""
SoftwareUpdate = 'SoftwareUpdate'
"""Skips the screen for watch migration in iOS."""
WatchMigration = 'WatchMigration'
"""Skips the Choose Your Look screen in macOS."""
Appearance = 'Appearance'
"""Disables FileVault Setup Assistant screen in macOS."""
FileVault = 'FileVault'
"""Skips iCloud Analytics screen in macOS."""
iCloudDiagnostics = 'iCloudDiagnostics'
"""Skips iCloud Documents and Desktop screen in macOS."""
iCloudStorage = 'iCloudStorage'
"""Disables registration screen in macOS"""
Registration = 'Registration'
# ATV
"""Skips the tvOS screen about using aerial screensavers in ATV."""
ScreenSaver = 'ScreenSaver'
"""Skips the Tap To Set Up option in ATV about using an iOS device to set up your ATV (instead of entering all
your account information and setting choices separately)."""
TapToSetup = 'TapToSetup'
"""Skips TV home screen layout sync screen in tvOS."""
TVHomeScreenSync = 'TVHomeScreenSync'
"""Skips the TV provider sign in screen in tvOS."""
TVProviderSignIn = 'TVProviderSignIn'
"""Skips the “Where is this Apple TV?” screen in tvOS."""
TVRoom = 'TVRoom'
SkipSetupSteps = Set[SetupAssistantStep]
class DEPProfileRemovalStatus(Enum):
SUCCESS = "SUCCESS"
NOT_ACCESSIBLE = "NOT_ACCESSIBLE"
FAILED = "FAILED"
SerialNumber = str
DEPProfileRemovals = Dict[SerialNumber, DEPProfileRemovalStatus]
class DEPOrgType(Enum):
"""This enum specifies allowable values for the ``org_type`` field of the dep /account endpoint."""
Education = 'edu'
Organization = 'org'
class DEPOrgVersion(Enum):
"""This enum specifies allowable values for the ``org_version`` field of the dep /account endpoint."""
v1 = 'v1' # Apple Deployment Programmes
v2 = 'v2' # Apple School Manager
class DEPOperationType(Enum):
"""This enum describes the types of operations returned in a DEP Sync Devices result."""
Added = 'added'
Modified = 'modified'
Deleted = 'deleted'
|
mit
| -1,331,331,849,121,758,200
| 36.057143
| 250
| 0.682087
| false
| 3.844862
| false
| false
| false
|
ad510/find_pennapps_hackers
|
find_pennapps_hackers.py
|
1
|
3884
|
#!/usr/bin/env python3
import http.client
import re
import sys
import time
def main():
# print info about me :)
print("Andrew Downing")
print("website: http://andrewd.50webs.com")
print("github: ad510")
print()
# find twitter usernames
twitterUsers = set(findbetween(gethttp("twitter.com", "/search?q=%23PennApps", True), "data-screen-name=\"", "\""))
for twitterUser in twitterUsers:
name = ""
domains = set()
githubUsers = set()
html = gethttp("twitter.com", "/" + twitterUser, True)
# find real name
nameFields = findbetween(html, "<span class=\"profile-field\">", "</span>")
if len(nameFields) > 0:
name = nameFields[0]
print(name)
print("twitter: " + twitterUser)
# find website domains
for url in findurls(html):
url2 = url[:len(url) - 1] if url.endswith("/") else url
if url2.find("twitter.com") == -1 and url2.find("twimg.com") == -1 and (url2.endswith(".com") or url2.endswith(".org") or url2.endswith(".net")):
domains.add(url2)
elif url.find("github.com") != -1:
githubUsers.add(url)
if len(domains) > 0:
print("website: " + str(domains))
# find github accounts
if "--duckduckgo" in sys.argv:
# duckduckgo
html = ""
try:
html = gethttp("duckduckgo.com", "/html/?q=site:github.com " + name, True)
except:
print("error searching 'site:github.com " + name + "'")
for url in findlinks(html):
if url.find("https://github.com/") != -1 and url.count("/") == 3:
githubUsers.add(url.split("github.com/")[1].split("/")[0])
time.sleep(2)
else:
# google
for url in findlinks(gethttp("www.google.com", "/search?q=site:github.com+" + name.replace(" ", "+"), True)):
if url.startswith("/url?q=https://github.com/") and url.count("/") == 4:
githubUsers.add(findbetween(url, "/url?q=https://github.com/", "&")[0].split("%")[0])
# find in website
for domain in domains:
for url in findlinks(gethttpsmart(domain)):
if (url.find("github.com/") != -1):
githubUsers.add(url.split("github.com/")[1].split("/")[0])
if len(githubUsers) > 0:
print("github: " + str(githubUsers))
print()
def gethttpsmart(url):
minusProtocol = url[url.find("//") + 2 : ]
if minusProtocol.find("/") == -1:
minusProtocol += "/"
return gethttp(minusProtocol.split("/")[0], "/" + minusProtocol.split("/")[1], url.startswith("https"))
def gethttp(domain, url, https):
#print(domain, url, https)
conn = http.client.HTTPSConnection(domain) if https else http.client.HTTPConnection(domain)
conn.request("GET", url)
r1 = conn.getresponse()
if (r1.status == 301 or r1.status == 302) and url != "/sorry":
return gethttpsmart(r1.getheader("Location")) # got a "moved permanently" error
elif r1.status != 200:
print("non-normal status connecting to", domain, url, r1.status, r1.reason)
r1str = str(r1.read())
conn.close()
return r1str
def findbetween(string, before, after):
ret = []
for match in re.finditer(re.escape(before), string):
ret.append(string[match.start() + len(before) : string.find(after, match.start() + len(before))])
return ret
def findurls(string): # thanks to https://stackoverflow.com/questions/6883049/regex-to-find-urls-in-string-in-python
return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
def findlinks(string):
return re.findall('href="?\'?([^"\'>]*)', string)
if __name__ == "__main__":
main()
|
mit
| -4,278,235,120,225,218,000
| 40.319149
| 157
| 0.558445
| false
| 3.449378
| false
| false
| false
|
dhowland/EasyAVR
|
keymapper/easykeymap/kleparse.py
|
1
|
9205
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2013-2017 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Parse JSON files from http://www.keyboard-layout-editor.com and convert
to an EasyAVR layout data structure. The conversion is not complete, because
layouts from KLE don't contain enough information to completely define a board.
This whole thing is a total hack-job. It is not meant to be a perfect solution,
it is only meant to be a quick way to start adding support for a new board.
"""
import json
from .build import NULL_SYMBOL
# All default legends from the ANSI104 and ISO105 predefined layouts.
conversion_table = {
"A": "HID_KEYBOARD_SC_A",
"B": "HID_KEYBOARD_SC_B",
"C": "HID_KEYBOARD_SC_C",
"D": "HID_KEYBOARD_SC_D",
"E": "HID_KEYBOARD_SC_E",
"F": "HID_KEYBOARD_SC_F",
"G": "HID_KEYBOARD_SC_G",
"H": "HID_KEYBOARD_SC_H",
"I": "HID_KEYBOARD_SC_I",
"J": "HID_KEYBOARD_SC_J",
"K": "HID_KEYBOARD_SC_K",
"L": "HID_KEYBOARD_SC_L",
"M": "HID_KEYBOARD_SC_M",
"N": "HID_KEYBOARD_SC_N",
"O": "HID_KEYBOARD_SC_O",
"P": "HID_KEYBOARD_SC_P",
"Q": "HID_KEYBOARD_SC_Q",
"R": "HID_KEYBOARD_SC_R",
"S": "HID_KEYBOARD_SC_S",
"T": "HID_KEYBOARD_SC_T",
"U": "HID_KEYBOARD_SC_U",
"V": "HID_KEYBOARD_SC_V",
"W": "HID_KEYBOARD_SC_W",
"X": "HID_KEYBOARD_SC_X",
"Y": "HID_KEYBOARD_SC_Y",
"Z": "HID_KEYBOARD_SC_Z",
"!\n1": "HID_KEYBOARD_SC_1_AND_EXCLAMATION",
"@\n2": "HID_KEYBOARD_SC_2_AND_AT",
"\"\n2": "HID_KEYBOARD_SC_2_AND_AT",
"#\n3": "HID_KEYBOARD_SC_3_AND_HASHMARK",
"£\n3": "HID_KEYBOARD_SC_3_AND_HASHMARK",
"$\n4": "HID_KEYBOARD_SC_4_AND_DOLLAR",
"%\n5": "HID_KEYBOARD_SC_5_AND_PERCENTAGE",
"^\n6": "HID_KEYBOARD_SC_6_AND_CARET",
"&\n7": "HID_KEYBOARD_SC_7_AND_AND_AMPERSAND",
"*\n8": "HID_KEYBOARD_SC_8_AND_ASTERISK",
"(\n9": "HID_KEYBOARD_SC_9_AND_OPENING_PARENTHESIS",
")\n0": "HID_KEYBOARD_SC_0_AND_CLOSING_PARENTHESIS",
"Enter": "HID_KEYBOARD_SC_ENTER",
"Esc": "HID_KEYBOARD_SC_ESCAPE",
"Backspace": "HID_KEYBOARD_SC_BACKSPACE",
"Tab": "HID_KEYBOARD_SC_TAB",
" ": "HID_KEYBOARD_SC_SPACE",
"_\n-": "HID_KEYBOARD_SC_MINUS_AND_UNDERSCORE",
"+\n=": "HID_KEYBOARD_SC_EQUAL_AND_PLUS",
"{\n[": "HID_KEYBOARD_SC_OPENING_BRACKET_AND_OPENING_BRACE",
"}\n]": "HID_KEYBOARD_SC_CLOSING_BRACKET_AND_CLOSING_BRACE",
"|\n\\": "HID_KEYBOARD_SC_BACKSLASH_AND_PIPE",
"~\n#": "HID_KEYBOARD_SC_NON_US_HASHMARK_AND_TILDE",
":\n;": "HID_KEYBOARD_SC_SEMICOLON_AND_COLON",
"\"\n'": "HID_KEYBOARD_SC_APOSTROPHE_AND_QUOTE",
"@\n'": "HID_KEYBOARD_SC_APOSTROPHE_AND_QUOTE",
"~\n`": "HID_KEYBOARD_SC_GRAVE_ACCENT_AND_TILDE",
"¬\n`": "HID_KEYBOARD_SC_GRAVE_ACCENT_AND_TILDE",
"<\n,": "HID_KEYBOARD_SC_COMMA_AND_LESS_THAN_SIGN",
">\n.": "HID_KEYBOARD_SC_DOT_AND_GREATER_THAN_SIGN",
"?\n/": "HID_KEYBOARD_SC_SLASH_AND_QUESTION_MARK",
"Caps Lock": "HID_KEYBOARD_SC_CAPS_LOCK",
"F1": "HID_KEYBOARD_SC_F1",
"F2": "HID_KEYBOARD_SC_F2",
"F3": "HID_KEYBOARD_SC_F3",
"F4": "HID_KEYBOARD_SC_F4",
"F5": "HID_KEYBOARD_SC_F5",
"F6": "HID_KEYBOARD_SC_F6",
"F7": "HID_KEYBOARD_SC_F7",
"F8": "HID_KEYBOARD_SC_F8",
"F9": "HID_KEYBOARD_SC_F9",
"F10": "HID_KEYBOARD_SC_F10",
"F11": "HID_KEYBOARD_SC_F11",
"F12": "HID_KEYBOARD_SC_F12",
"PrtSc": "HID_KEYBOARD_SC_PRINT_SCREEN",
"Scroll Lock": "HID_KEYBOARD_SC_SCROLL_LOCK",
"Pause\nBreak": "HID_KEYBOARD_SC_PAUSE",
"Insert": "HID_KEYBOARD_SC_INSERT",
"Home": "HID_KEYBOARD_SC_HOME",
"PgUp": "HID_KEYBOARD_SC_PAGE_UP",
"Delete": "HID_KEYBOARD_SC_DELETE",
"End": "HID_KEYBOARD_SC_END",
"PgDn": "HID_KEYBOARD_SC_PAGE_DOWN",
"→": "HID_KEYBOARD_SC_RIGHT_ARROW",
"←": "HID_KEYBOARD_SC_LEFT_ARROW",
"↓": "HID_KEYBOARD_SC_DOWN_ARROW",
"↑": "HID_KEYBOARD_SC_UP_ARROW",
"Num Lock": "HID_KEYBOARD_SC_NUM_LOCK",
"/": "HID_KEYBOARD_SC_KEYPAD_SLASH",
"*": "HID_KEYBOARD_SC_KEYPAD_ASTERISK",
"-": "HID_KEYBOARD_SC_KEYPAD_MINUS",
"+": "HID_KEYBOARD_SC_KEYPAD_PLUS",
"kpEnter": "HID_KEYBOARD_SC_KEYPAD_ENTER",
"1\nEnd": "HID_KEYBOARD_SC_KEYPAD_1_AND_END",
"2\n↓": "HID_KEYBOARD_SC_KEYPAD_2_AND_DOWN_ARROW",
"3\nPgDn": "HID_KEYBOARD_SC_KEYPAD_3_AND_PAGE_DOWN",
"4\n←": "HID_KEYBOARD_SC_KEYPAD_4_AND_LEFT_ARROW",
"5": "HID_KEYBOARD_SC_KEYPAD_5",
"6\n→": "HID_KEYBOARD_SC_KEYPAD_6_AND_RIGHT_ARROW",
"7\nHome": "HID_KEYBOARD_SC_KEYPAD_7_AND_HOME",
"8\n↑": "HID_KEYBOARD_SC_KEYPAD_8_AND_UP_ARROW",
"9\nPgUp": "HID_KEYBOARD_SC_KEYPAD_9_AND_PAGE_UP",
"0\nIns": "HID_KEYBOARD_SC_KEYPAD_0_AND_INSERT",
".\nDel": "HID_KEYBOARD_SC_KEYPAD_DOT_AND_DELETE",
# "|\n\\": "HID_KEYBOARD_SC_NON_US_BACKSLASH_AND_PIPE",
"Menu": "HID_KEYBOARD_SC_APPLICATION",
"=": "HID_KEYBOARD_SC_KEYPAD_EQUAL_SIGN",
"Ctrl": "HID_KEYBOARD_SC_LEFT_CONTROL",
"Shift": "HID_KEYBOARD_SC_LEFT_SHIFT",
"Alt": "HID_KEYBOARD_SC_LEFT_ALT",
"Win": "HID_KEYBOARD_SC_LEFT_GUI",
"rCtrl": "HID_KEYBOARD_SC_RIGHT_CONTROL",
"rShift": "HID_KEYBOARD_SC_RIGHT_SHIFT",
"AltGr": "HID_KEYBOARD_SC_RIGHT_ALT",
"rWin": "HID_KEYBOARD_SC_RIGHT_GUI",
}
def convert(s, legend, width=4, height=4):
"""Utility function to make legends less ambiguous."""
if legend == 'Enter' and width == 4 and height == 8:
legend = 'kpEnter'
elif legend == '' and width > 8:
legend = ' '
elif legend == 'Ctrl':
if s['ctrl']:
legend = 'rCtrl'
else:
s['ctrl'] = True
elif legend == 'Shift':
if s['shift']:
legend = 'rShift'
else:
s['shift'] = True
elif legend == 'Alt':
if s['alt']:
legend = 'AltGr'
else:
s['alt'] = True
elif legend == 'Win':
if s['win']:
legend = 'rWin'
else:
s['win'] = True
try:
return conversion_table[legend]
except KeyError:
return NULL_SYMBOL
def parse(path):
"""Open the JSON file at `path` and return a structure of the layout for
use in EasyAVR board config files.
"""
with open(path, encoding="utf8") as fp:
jslayout = json.load(fp)
state = {
'ctrl': False,
'shift': False,
'alt': False,
'win': False,
}
width = 4
height = 4
maxwidth = 0
totalwidth = 0
totalheight = 0
rownum = 0
colnum = 0
maxcols = 0
overhang = False
lastoverhang = False
layout = []
for row in jslayout:
newrow = []
if totalwidth > maxwidth:
maxwidth = totalwidth
totalwidth = 0
if colnum > maxcols:
maxcols = colnum
colnum = 0
overhang = False
for item in row:
if isinstance(item, str):
scancode = convert(state, item, width, height)
newrow.append(((width, height), (rownum, colnum), scancode))
totalwidth += width
width = 4
height = 4
colnum += 1
elif isinstance(item, dict):
for param, val in item.items():
if param == 'w':
width = int(val * 4)
elif param == 'h':
height = int(val * 4)
if height != 8:
raise Exception("Only heights of 1u or 2u are supported.")
overhang = True
elif param == 'x':
if lastoverhang:
# total hack to prevent overlaps in ISO enter
newrow.append((int(val * -4), None, NULL_SYMBOL))
else:
newrow.append((int(val * 4), None, NULL_SYMBOL))
totalwidth += int(val * 4)
elif param == 'y':
layout.append(int(val * 4))
totalheight += int(val * 4)
else:
continue
else:
raise TypeError("Unrecognized object in row array.")
layout.append(newrow)
totalheight += 4
rownum += 1
lastoverhang = overhang
return {
'display_height': totalheight,
'display_width': maxwidth,
'num_rows': rownum,
'num_cols': maxcols,
'layout': layout,
}
|
gpl-2.0
| 988,168,901,522,469,400
| 34.747082
| 86
| 0.560684
| false
| 2.957824
| false
| false
| false
|
markvoelker/refstack
|
refstack/db/utils.py
|
1
|
1872
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for database."""
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class PluggableBackend(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
"""Init."""
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
"""Get backend."""
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends: # pragma: no cover
raise Exception('Invalid backend: %s' % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple): # pragma: no cover
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
"""Proxy interface to backend."""
backend = self.__get_backend()
return getattr(backend, key)
|
apache-2.0
| -6,631,642,058,597,606,000
| 32.428571
| 78
| 0.608974
| false
| 4.303448
| false
| false
| false
|
Murkantilism/LoL_API_Research
|
Summoner_Data_Retrieval/DEPRECATED/Generate_Summoners_Hourly.py
|
1
|
1818
|
__author__ = 'Deniz'
import time, subprocess, argparse, getopt
from sys import argv
import sys, os
DEFAULT_NUM_SUMMONERS = 250
DEFAULT_LOCATION = os.curdir + "\_out\Random_Summoners_run_"+str(time.time())
def main():
parser = argparse.ArgumentParser(description='Attempt to generate X number'
' of random summoners hourly.')
parser.add_argument('-out', metavar='o', type=str, default=DEFAULT_LOCATION, help='the output location ' + str(DEFAULT_LOCATION))
parser.add_argument('-num', metavar='n', type=int, default=DEFAULT_NUM_SUMMONERS,
help='number of summoners (default: ' +
str(DEFAULT_NUM_SUMMONERS) + ')',)
args = parser.parse_args()
#print vars(args).values()
# Assign the number of summoners
numSummoners = vars(args).values()[0]
# Assign the output path
outputLocation = vars(args).values()[1]
subprocess.check_call('python Generate_Summoners.py' + ' -out ' +
str(outputLocation) + ' -num ' +
str(numSummoners), shell=True)
subprocess.check_call('python Get_Most_Used_Champion.py' + ' -out ' +
str(outputLocation), shell=True)
subprocess.check_call('python Check_Duplicate_Summoners.py' + ' -out ' +
str(outputLocation), shell=True)
subprocess.check_call('python Scrub_Useless_Summoners.py' + ' -out ' +
str(outputLocation), shell=True)
time.sleep(3600-time.time()%3600)
main()
# The usage information returned when -h parameter is given
def usage():
print "\nThis is the CLI for the dan audio matcher program\n"
print 'Usage: ' + argv[0] + ' -f <set1> -f <set2>'
if __name__ == "__main__":
main()
|
mit
| 5,609,995,969,163,271,000
| 38.543478
| 133
| 0.59516
| false
| 3.710204
| false
| false
| false
|
Intelimetrica/coati
|
coati/generator.py
|
1
|
3847
|
from coati.powerpoint import open_pptx, runpowerpoint
import os
import sys
import logging
from shutil import copyfile
from colorlog import ColoredFormatter
LOG_LEVEL = logging.DEBUG
LOGFORMAT = "%(asctime)s - %(log_color)s%(message)s"
logging.root.setLevel(LOG_LEVEL)
formatter = ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setLevel(LOG_LEVEL)
stream.setFormatter(formatter)
log = logging.getLogger('pythonConfig')
log.setLevel(LOG_LEVEL)
log.addHandler(stream)
this_dir = os.path.dirname(__file__)
template_path = os.path.join(this_dir, 'templates/slide_template.txt')
config_template_path = os.path.join(this_dir, 'templates/config_template.txt')
init_template_path = os.path.join(this_dir, 'templates/init_template.txt')
def _get_slides_shapes(ppt_path):
pptapp = runpowerpoint()
pptFile = open_pptx(pptapp, ppt_path)
log.debug('Open Template successfully...')
all_slide_shapes = []
for slide in pptFile.Slides:
shapes_in_slide = _get_shapes_in_slide(slide)
all_slide_shapes.append(shapes_in_slide)
pptFile.close()
pptapp.Quit()
log.debug('Finish reading template...')
return all_slide_shapes
def _get_shapes_in_slide(slide):
shapes_in_slide = {each_shape.name: () for each_shape in slide.shapes}
return shapes_in_slide
def _generate_path(p):
if not os.path.exists(os.path.dirname(p)):
try:
os.makedirs(os.path.dirname(p))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def _cp(src, dst, fn):
source = open(src, 'r')
result = fn(source.read())
destination = open(dst, 'w')
destination.write(result)
source.close
destination.close
def _insert_code(complete_text, text_to_insert, text_to_replace):
ans = complete_text.replace(text_to_replace, text_to_insert)
return ans
def _file_exists(ppt_path):
if not (ppt_path.endswith('.pptx') or ppt_path.endswith('.ppt')):
sys.exit('The file provided is not a PPT file')
elif not os.path.isfile(ppt_path):
sys.exit('The PPT file provided doesnt exist or is damaged')
pass
def generate(project_name, ppt_path):
_file_exists(ppt_path)
path = os.path.abspath(project_name)
spaces = " " * 12
slide_tuples = '['
#Generate builders/ folder prior slides creation
path_builders = os.path.join(path, 'builders/')
_generate_path(path_builders)
log.info('create folder %s', "./builders/")
for i, slide in enumerate(_get_slides_shapes(ppt_path)):
slide_name = 'slide' + str(i+1)
filename = path_builders + slide_name + '.py';
#Create slide#.py with the template info
_cp(template_path, filename, lambda source: _insert_code(
source,
str(slide).replace(", ",",\n" + spaces),
'"_-{}-_"'))
log.info('create %s', filename)
#This line is in the for loop cause is gathering info for the config.py
slide_tuples += ('\n' + spaces if i != 0 else '') + '(' + str(i+1) + ', ' + slide_name + '.build()),'
#Generate config.py with already gathered info in slide_tuples
config_filename = path + '/config.py'
_cp(config_template_path, config_filename, lambda source: _insert_code(
source,
(slide_tuples[:-1] + ']'),
'"_-{}-_"'))
log.info('create %s', config_filename)
#Create __init__ in builders
init_file = path + '/builders/__init__.py'
copyfile(init_template_path, init_file)
log.info('create %s', init_file)
#Copy original template file
copy_ppt = path + '/' + str(os.path.split(ppt_path)[-1])
_cp(ppt_path, copy_ppt , lambda source: source)
log.info('copy %s', copy_ppt)
#Add images folder
_generate_path(os.path.join(path, 'images/'))
log.info('create folder %s', "./images/")
|
mit
| -2,483,990,554,525,858,300
| 32.452174
| 109
| 0.643878
| false
| 3.299314
| true
| false
| false
|
samuelpych/Final-Project
|
finalproject.py
|
1
|
5690
|
"""
Finalproject.py
Author: Sam Pych
Credit: Thomas Kyle Postans, Hagin, My Space Game, David Wilson
Assignment: Create a pong game with two movable blocks and the ball either bounces off the wall
or appears on the other side.
optional: keep score
bounde=self.collidingWithSprites(Pongblock1)
"""
from ggame import App, Sprite, ImageAsset, Frame
from ggame import SoundAsset, Sound, TextAsset, Color
import math
from time import time
from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
class Pongblock(Sprite):
black = Color(0x000000, 1.0)
thinline= LineStyle(1, black)
rectangle_asset=RectangleAsset(50, 100, thinline, black)
#rectangle1 = Sprite(rectangle_asset, (1100,250))
def __init__(self, position):
super().__init__(Pongblock1.rectangle_asset, position)
self.vy = 1
ponggame.listenKeyEvent("keydown", "up arrow", self.up)
ponggame.listenKeyEvent("keydown","left arrow",self.left)
ponggame.listenKeyEvent("keydown","down arrow",self.down)
ponggame.listenKeyEvent("keydown","right arrow",self.right)
self.fxcenter = self.fycenter = 0.5
def step(self):
self.y += self.vy
#self.y += self.vy
if self.y >480:
self.y=479
if self.y <-1:
self.y=0
def up(self, event):
self.vy -=2
def down(self, event):
self.vy +=2
def left(self, event):
self.vx -=2
def right(self, event):
self.vx +=2
class Pongblock1(Sprite):
black = Color(0x000000, 1.0)
thinline= LineStyle(1, black)
rectangle_asset=RectangleAsset(50, 100, thinline, black)
#rectangle1 = Sprite(rectangle_asset, (1100,250))
def __init__(self, position):
super().__init__(Pongblock1.rectangle_asset, position)
self.vy = 1
ponggame.listenKeyEvent("keydown", "w", self.up)
ponggame.listenKeyEvent("keydown","a",self.left)
ponggame.listenKeyEvent("keydown","s",self.down)
ponggame.listenKeyEvent("keydown","d",self.right)
self.fxcenter = self.fycenter = 0.5
def step(self):
self.y += self.vy
#self.y += self.vy
if self.y >480:
self.y=479
if self.y <-1:
self.y=0
def up(self, event):
self.vy -=2
def down(self, event):
self.vy +=2
def left(self, event):
self.vx -=2
def right(self, event):
self.vx +=2
class pongball(Sprite):
red = Color(0xff0000, 1.0)
thinline= LineStyle(1, red)
circle_asset=CircleAsset(25, thinline, red)
#circle1 = Sprite(circle_asset, (600,300))
circle=CircleAsset(1500, thinline, red)
def __init__(self, position):
super().__init__(pongball.circle_asset, position)
self.vx = 2
self.vy = 10
previousY = self.vy
self.fxcenter = self.fycenter = 0.5
def step(self):
self.x += self.vx
self.y += self.vy
if self.y >500:
self.vy=-7
if self.y <-1:
self.vy=7
if self.visible:
collides = self.collidingWithSprites(Scoreline)
if len(collides):
if collides[0].visible:
print("arrow keys win")
self.x += self.vx
self.x += self.vx
return True
if self.visible:
collides2 = self.collidingWithSprites(Scoreline2)
if len(collides2):
if collides2[0].visible:
print("wasd wins")
self.x += self.vx
self.x += self.vx
if self.visible:
collides3 = self.collidingWithSprites(Pongblock1)
if len(collides3):
if collides3[0].visible:
self.vx = 6
self.vy = 6
self.x += self.vx
self.y += self.vy
if self.visible:
collides4 = self.collidingWithSprites(Pongblock)
if len(collides4):
if collides4[0].visible:
self.vx = -6
self.vy = -4
self.x += self.vx
self.y += self.vy
class Scoreline(Sprite):
blue = Color(0x0000ff, 1.0)
thinline= LineStyle(1, blue)
rectangle_asset=RectangleAsset(10, 2000, thinline, blue)
#rectangle = Sprite(rectangle_asset, (00,-100))
def __init__(self, position):
super().__init__(Scoreline.rectangle_asset, position)
class Scoreline2(Sprite):
blue = Color(0x0000ff, 1.0)
thinline= LineStyle(1, blue)
rectangle_asset=RectangleAsset(10, 2000, thinline, blue)
#rectangle = Sprite(rectangle_asset, (1200,-100))
def __init__(self, position):
super().__init__(Scoreline2.rectangle_asset, position)
#class Scoreboard:
# Not enough time to do it
class ponggame(App):
def __init__(self, width, height):
super().__init__(width, height)
Pongblock1((100,10))
Scoreline((00,-100))
Pongblock((1100,250))
Scoreline2((1200,-100))
pongball((1000,100))
print(self.getSpritesbyClass(pongball))
def step(self):
for x in self.getSpritesbyClass(Pongblock1):
x.step()
for x in self.getSpritesbyClass(Pongblock):
x.step()
for x in self.getSpritesbyClass(pongball):
x.step()
def restart(self):
ponggame.listenKeyEvent("keydown","spacebar",self.restart)
app = ponggame(0,0)
app.run()
|
mit
| -5,657,514,026,309,861,000
| 33.70122
| 104
| 0.577153
| false
| 3.490798
| false
| false
| false
|
zasdfgbnm/tensorflow
|
tensorflow/contrib/quantize/python/common.py
|
1
|
4098
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities used across this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
# Skip all operations that are backprop related or export summaries.
SKIPPED_PREFIXES = (
'gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary',
'ScalarSummary')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['/Relu6', '/Relu', '/Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)/BatchNorm/batchnorm')
def BatchNormGroups(graph):
"""Finds batch norm layers, returns their prefixes as a list of strings.
Args:
graph: Graph to inspect.
Returns:
List of strings, prefixes of batch norm group names found.
"""
bns = []
for op in graph.get_operations():
match = _BATCHNORM_RE.search(op.name)
if match:
bn = match.group(1)
if not bn.startswith(SKIPPED_PREFIXES):
bns.append(bn)
# Filter out duplicates.
return list(collections.OrderedDict.fromkeys(bns))
def GetEndpointActivationOp(graph, prefix):
"""Returns an Operation with the given prefix and a valid end point suffix.
Args:
graph: Graph where to look for the operation.
prefix: String, prefix of Operation to return.
Returns:
The Operation with the given prefix and a valid end point suffix or None if
there are no matching operations in the graph for any valid suffix
"""
for suffix in _ACTIVATION_OP_SUFFIXES:
activation = _GetOperationByNameDontThrow(graph, prefix + suffix)
if activation:
return activation
return None
def _GetOperationByNameDontThrow(graph, name):
"""Returns an Operation with the given name.
Args:
graph: Graph where to look for the operation.
name: String, name of Operation to return.
Returns:
The Operation with the given name. None if the name does not correspond to
any operation in the graph
"""
try:
return graph.get_operation_by_name(name)
except KeyError:
return None
def CreateOrGetQuantizationStep():
"""Returns a Tensor of the number of steps the quantized graph has run.
Returns:
Quantization step Tensor.
"""
quantization_step_name = 'fake_quantization_step'
quantization_step_tensor_name = quantization_step_name + '/AssignAdd:0'
g = ops.get_default_graph()
try:
return g.get_tensor_by_name(quantization_step_tensor_name)
except KeyError:
# Create in proper graph and base name_scope.
with g.name_scope(None):
quantization_step_tensor = variable_scope.get_variable(
quantization_step_name,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
with g.name_scope(quantization_step_tensor.op.name + '/'):
# We return the incremented variable tensor. Since this is used in conds
# for quant_delay and freeze_bn_delay, it will run once per graph
# execution.
return state_ops.assign_add(quantization_step_tensor, 1)
|
apache-2.0
| -8,926,153,720,807,249,000
| 32.590164
| 80
| 0.704978
| false
| 4.069513
| false
| false
| false
|
corradio/electricitymap
|
test_parser.py
|
1
|
4107
|
#!/usr/bin/env python3
"""
Usage: poetry run test_parser FR production
"""
import time
import sys
import pprint
import datetime
import logging
import arrow
import click
from electricitymap.contrib.parsers.lib.parsers import PARSER_KEY_TO_DICT
from parsers.lib.quality import (
validate_consumption,
validate_production,
validate_exchange,
ValidationError,
)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
@click.command()
@click.argument("zone")
@click.argument("data-type", default="production")
@click.option("--target_datetime", default=None, show_default=True)
def test_parser(zone, data_type, target_datetime):
"""
Parameters
----------
zone: a two letter zone from the map
data_type: in ['production', 'exchangeForecast', 'production', 'exchange',
'price', 'consumption', 'generationForecast', 'consumptionForecast']
target_datetime: string parseable by arrow, such as 2018-05-30 15:00
Examples
-------
>>> python test_parser.py NO-NO3-\>SE exchange
parser result:
{'netFlow': -51.6563, 'datetime': datetime.datetime(2018, 7, 3, 14, 38, tzinfo=tzutc()), 'source': 'driftsdata.stattnet.no', 'sortedZoneKeys': 'NO-NO3->SE'}
---------------------
took 0.09s
min returned datetime: 2018-07-03 14:38:00+00:00
max returned datetime: 2018-07-03T14:38:00+00:00 UTC -- OK, <2h from now :) (now=2018-07-03T14:39:16.274194+00:00 UTC)
>>> python test_parser.py FR production
parser result:
[... long stuff ...]
---------------------
took 5.38s
min returned datetime: 2018-07-02 00:00:00+02:00
max returned datetime: 2018-07-03T14:30:00+00:00 UTC -- OK, <2h from now :) (now=2018-07-03T14:43:35.501375+00:00 UTC)
"""
if target_datetime:
target_datetime = arrow.get(target_datetime).datetime
start = time.time()
parser = PARSER_KEY_TO_DICT[data_type][zone]
if data_type in ["exchange", "exchangeForecast"]:
args = zone.split("->")
else:
args = [zone]
res = parser(
*args, target_datetime=target_datetime, logger=logging.getLogger(__name__)
)
if not res:
raise ValueError('Error: parser returned nothing ({})'.format(res))
elapsed_time = time.time() - start
if isinstance(res, (list, tuple)):
res_list = list(res)
else:
res_list = [res]
try:
dts = [e["datetime"] for e in res_list]
except:
raise ValueError('Parser output lacks `datetime` key for at least some of the '
'ouput. Full ouput: \n\n{}\n'.format(res))
assert all([type(e['datetime']) is datetime.datetime for e in res_list]), \
'Datetimes must be returned as native datetime.datetime objects'
last_dt = arrow.get(max(dts)).to('UTC')
first_dt = arrow.get(min(dts)).to('UTC')
max_dt_warning = ''
if not target_datetime:
max_dt_warning = (
" :( >2h from now !!!"
if (arrow.utcnow() - last_dt).total_seconds() > 2 * 3600
else " -- OK, <2h from now :) (now={} UTC)".format(arrow.utcnow())
)
print("Parser result:")
pp = pprint.PrettyPrinter(width=120)
pp.pprint(res)
print(
"\n".join(
[
"---------------------",
"took {:.2f}s".format(elapsed_time),
"min returned datetime: {} UTC".format(first_dt),
"max returned datetime: {} UTC {}".format(last_dt, max_dt_warning),
]
)
)
if type(res) == dict:
res = [res]
for event in res:
try:
if data_type == "production":
validate_production(event, zone)
elif data_type == "consumption":
validate_consumption(event, zone)
elif data_type == "exchange":
validate_exchange(event, zone)
except ValidationError as e:
logger.warning('Validation failed @ {}: {}'.format(event['datetime'], e))
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
print(test_parser())
|
gpl-3.0
| -8,112,435,090,802,228,000
| 30.837209
| 160
| 0.590699
| false
| 3.571304
| false
| false
| false
|
brendanv/nasa-api
|
nasa/maas.py
|
1
|
1543
|
from nasa import api
from nasa.base import NasaApiObject
''' Retrieves the most recent MAAS Report '''
def latest():
response = api.external_api_get(
'http://marsweather.ingenology.com/v1/latest/',
{},
)
return MAASReport.from_response(response['report'])
''' Retrieves the set of MAAS Reports that match the filters
provided via keyword args. Most report fields can be used as
filters.
'''
def archived(**kwargs):
return _maas_paginate(
'http://marsweather.ingenology.com/v1/archive/',
**kwargs
)
def _maas_paginate(url, **kwargs):
response = api.external_api_get(url, kwargs)
response['results'] = [
MAASReport.from_response(r) for r in response['results']
]
next_url = response['next']
if next_url is not None:
response['next'] = lambda: _maas_paginate(next_url)
prev_url = response['previous']
if prev_url is not None:
response['previous'] = lambda: _maas_paginate(prev_url)
return response
class MAASReport(NasaApiObject):
"""Mars Atmospheric Aggregation System Report"""
class Meta(object):
properties = ['terrestrial_date', 'sol', 'ls', 'min_temp',
'min_temp_fahrenheit', 'max_temp', 'max_temp_fahrenheit',
'pressure', 'pressure_string', 'abs_humidity',
'wind_speed', 'wind_direction', 'atmo_opacity', 'season',
'sunrise', 'sunset']
def __init__(self, **kwargs):
super(MAASReport, self).__init__(**kwargs)
|
gpl-3.0
| 5,404,063,207,014,197,000
| 32.543478
| 79
| 0.615684
| false
| 3.596737
| false
| false
| false
|
wkerzendorf/chiantipy
|
chiantipy/chianti/__init__.py
|
1
|
2095
|
'''the ChiantiPy - CHIANTI Python package
calculates various aspects of emission line and continua from the
CHIANTI atomic database for astrophysical spectroscopy'''
import os
import constants
import filters
import mputil
#
#try:
# chInteractive = int(os.environ['CHIANTIPY_INTERACTIVE'])
#except:
# chInteractive = 1
#if chInteractive:
# import pylab as pl
#else:
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as pl
###
#xuvtop = os.environ['XUVTOP']
##chInteractive=1
#Defaults = util.defaultsRead(verbose = chInteractive)
#Ip = util.ipRead()
#MasterList = util.masterListRead()
#AbundanceAll = util.abundanceRead(abundancename = Defaults['abundfile'])
#IoneqAll = util.ioneqRead(ioneqname = Defaults['ioneqfile'])
#import version
#__version__ = version.__version__
#__version_info__ = version.__version_info__
#import core
import pylab as pl
if pl.rcParams['backend'].lower() == 'qt4agg':
import gui_qt.gui as gui
elif pl.rcParams['backend'].lower() == 'wxagg':
import gui_wx.gui as gui
elif pl.rcParams['backend'].lower() == 'gtkagg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'agg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'agg':
import gui_cl.gui as gui
elif pl.rcParams['backend'].lower() == 'macosx':
import gui_cl.gui as gui
else:
print ' - Warning - '
print ' - in order to use the various gui dialogs, the matlpotlib/pylab backend needs'
print ' - to be either Qt4Agg or WXAgg - '
print ' - in order to use the command line dialogs, the matlpotlib/pylab backend needs'
print ' - to be GTKAgg or MacOSX - '
print ' - current backend is ',pl.rcParams['backend']
print ' - the full functionality of the chianti.core.ion class may not be available'
print ' - it would probably be better to set your matplotlib backend to either'
print ' - Qt4Agg, WXAgg, GTKAgg, or MacOSX'
print ' - using the command line dialogs for now but there could be problems -'
import gui_cl.gui as gui
#
# placed here because util needs gui
import util
|
gpl-3.0
| 1,139,370,885,466,107,600
| 33.916667
| 91
| 0.702148
| false
| 3.341308
| false
| false
| false
|
librasungirl/openthread
|
tests/toranj/wpan.py
|
1
|
61707
|
#!/usr/bin/env python3
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import time
import re
import random
import weakref
import subprocess
import socket
import asyncore
import inspect
# ----------------------------------------------------------------------------------------------------------------------
# wpantund properties
WPAN_STATE = 'NCP:State'
WPAN_NAME = 'Network:Name'
WPAN_PANID = 'Network:PANID'
WPAN_XPANID = 'Network:XPANID'
WPAN_KEY = 'Network:Key'
WPAN_KEY_INDEX = 'Network:KeyIndex'
WPAN_CHANNEL = 'NCP:Channel'
WPAN_HW_ADDRESS = 'NCP:HardwareAddress'
WPAN_EXT_ADDRESS = 'NCP:ExtendedAddress'
WPAN_POLL_INTERVAL = 'NCP:SleepyPollInterval'
WPAN_NODE_TYPE = 'Network:NodeType'
WPAN_ROLE = 'Network:Role'
WPAN_PARTITION_ID = 'Network:PartitionId'
WPAN_NCP_VERSION = 'NCP:Version'
WPAN_NCP_MCU_POWER_STATE = "NCP:MCUPowerState"
WPAN_NETWORK_ALLOW_JOIN = 'com.nestlabs.internal:Network:AllowingJoin'
WPAN_NETWORK_PASSTHRU_PORT = 'com.nestlabs.internal:Network:PassthruPort'
WPAN_RCP_VERSION = "POSIXApp:RCPVersion"
WPAN_IP6_LINK_LOCAL_ADDRESS = "IPv6:LinkLocalAddress"
WPAN_IP6_MESH_LOCAL_ADDRESS = "IPv6:MeshLocalAddress"
WPAN_IP6_MESH_LOCAL_PREFIX = "IPv6:MeshLocalPrefix"
WPAN_IP6_ALL_ADDRESSES = "IPv6:AllAddresses"
WPAN_IP6_MULTICAST_ADDRESSES = "IPv6:MulticastAddresses"
WPAN_IP6_INTERFACE_ROUTES = "IPv6:Routes"
WPAN_DAEMON_OFF_MESH_ROUTE_AUTO_ADD_ON_INTERFACE = "Daemon:OffMeshRoute:AutoAddOnInterface"
WPAN_DAEMON_OFF_MESH_ROUTE_FILTER_SELF_AUTO_ADDED = "Daemon:OffMeshRoute:FilterSelfAutoAdded"
WPAN_DAEMON_ON_MESH_PREFIX_AUTO_ADD_AS_INTERFACE_ROUTE = "Daemon:OnMeshPrefix:AutoAddAsInterfaceRoute"
WPAN_THREAD_RLOC16 = "Thread:RLOC16"
WPAN_THREAD_ROUTER_ID = "Thread:RouterID"
WPAN_THREAD_LEADER_ADDRESS = "Thread:Leader:Address"
WPAN_THREAD_LEADER_ROUTER_ID = "Thread:Leader:RouterID"
WPAN_THREAD_LEADER_WEIGHT = "Thread:Leader:Weight"
WPAN_THREAD_LEADER_LOCAL_WEIGHT = "Thread:Leader:LocalWeight"
WPAN_THREAD_LEADER_NETWORK_DATA = "Thread:Leader:NetworkData"
WPAN_THREAD_STABLE_LEADER_NETWORK_DATA = "Thread:Leader:StableNetworkData"
WPAN_THREAD_NETWORK_DATA = "Thread:NetworkData"
WPAN_THREAD_CHILD_TABLE = "Thread:ChildTable"
WPAN_THREAD_CHILD_TABLE_ASVALMAP = "Thread:ChildTable:AsValMap"
WPAN_THREAD_CHILD_TABLE_ADDRESSES = "Thread:ChildTable:Addresses"
WPAN_THREAD_NEIGHBOR_TABLE = "Thread:NeighborTable"
WPAN_THREAD_NEIGHBOR_TABLE_ASVALMAP = "Thread:NeighborTable:AsValMap"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES = "Thread:NeighborTable:ErrorRates"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES_AVVALMAP = "Thread:NeighborTable:ErrorRates:AsValMap"
WPAN_THREAD_ROUTER_TABLE = "Thread:RouterTable"
WPAN_THREAD_ROUTER_TABLE_ASVALMAP = "Thread:RouterTable:AsValMap"
WPAN_THREAD_CHILD_TIMEOUT = "Thread:ChildTimeout"
WPAN_THREAD_PARENT = "Thread:Parent"
WPAN_THREAD_PARENT_ASVALMAP = "Thread:Parent:AsValMap"
WPAN_THREAD_NETWORK_DATA_VERSION = "Thread:NetworkDataVersion"
WPAN_THREAD_STABLE_NETWORK_DATA = "Thread:StableNetworkData"
WPAN_THREAD_STABLE_NETWORK_DATA_VERSION = "Thread:StableNetworkDataVersion"
WPAN_THREAD_PREFERRED_ROUTER_ID = "Thread:PreferredRouterID"
WPAN_THREAD_COMMISSIONER_ENABLED = "Thread:Commissioner:Enabled"
WPAN_THREAD_DEVICE_MODE = "Thread:DeviceMode"
WPAN_THREAD_OFF_MESH_ROUTES = "Thread:OffMeshRoutes"
WPAN_THREAD_ON_MESH_PREFIXES = "Thread:OnMeshPrefixes"
WPAN_THREAD_ROUTER_ROLE_ENABLED = "Thread:RouterRole:Enabled"
WPAN_THREAD_CONFIG_FILTER_RLOC_ADDRESSES = "Thread:Config:FilterRLOCAddresses"
WPAN_THREAD_ROUTER_UPGRADE_THRESHOLD = "Thread:RouterUpgradeThreshold"
WPAN_THREAD_ROUTER_DOWNGRADE_THRESHOLD = "Thread:RouterDowngradeThreshold"
WPAN_THREAD_ACTIVE_DATASET = "Thread:ActiveDataset"
WPAN_THREAD_ACTIVE_DATASET_ASVALMAP = "Thread:ActiveDataset:AsValMap"
WPAN_THREAD_PENDING_DATASET = "Thread:PendingDataset"
WPAN_THREAD_PENDING_DATASET_ASVALMAP = "Thread:PendingDataset:AsValMap"
WPAN_THREAD_ADDRESS_CACHE_TABLE = "Thread:AddressCacheTable"
WPAN_THREAD_ADDRESS_CACHE_TABLE_ASVALMAP = "Thread:AddressCacheTable:AsValMap"
WPAN_OT_LOG_LEVEL = "OpenThread:LogLevel"
WPAN_OT_SLAAC_ENABLED = "OpenThread:SLAAC:Enabled"
WPAN_OT_STEERING_DATA_ADDRESS = "OpenThread:SteeringData:Address"
WPAN_OT_STEERING_DATA_SET_WHEN_JOINABLE = "OpenThread:SteeringData:SetWhenJoinable"
WPAN_OT_MSG_BUFFER_COUNTERS = "OpenThread:MsgBufferCounters"
WPAN_OT_MSG_BUFFER_COUNTERS_AS_STRING = "OpenThread:MsgBufferCounters:AsString"
WPAN_OT_DEBUG_TEST_ASSERT = "OpenThread:Debug:TestAssert"
WPAN_OT_DEBUG_TEST_WATCHDOG = "OpenThread:Debug:TestWatchdog"
WPAN_MAC_WHITELIST_ENABLED = "MAC:Whitelist:Enabled"
WPAN_MAC_WHITELIST_ENTRIES = "MAC:Whitelist:Entries"
WPAN_MAC_WHITELIST_ENTRIES_ASVALMAP = "MAC:Whitelist:Entries:AsValMap"
WPAN_MAC_BLACKLIST_ENABLED = "MAC:Blacklist:Enabled"
WPAN_MAC_BLACKLIST_ENTRIES = "MAC:Blacklist:Entries"
WPAN_MAC_BLACKLIST_ENTRIES_ASVALMAP = "MAC:Blacklist:Entries:AsValMap"
WPAN_MAC_FILTER_FIXED_RSSI = "MAC:Filter:FixedRssi"
WPAN_MAC_FILTER_ENTRIES = "MAC:Filter:Entries"
WPAN_MAC_FILTER_ENTRIES_ASVALMAP = "MAC:Filter:Entries:AsValMap"
WPAN_CHILD_SUPERVISION_INTERVAL = "ChildSupervision:Interval"
WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT = "ChildSupervision:CheckTimeout"
WPAN_JAM_DETECTION_STATUS = "JamDetection:Status"
WPAN_JAM_DETECTION_ENABLE = "JamDetection:Enable"
WPAN_JAM_DETECTION_RSSI_THRESHOLD = "JamDetection:RssiThreshold"
WPAN_JAM_DETECTION_WINDOW = "JamDetection:Window"
WPAN_JAM_DETECTION_BUSY_PERIOD = "JamDetection:BusyPeriod"
WPAN_JAM_DETECTION_DEBUG_HISTORY_BITMAP = "JamDetection:Debug:HistoryBitmap"
WPAN_CHANNEL_MONITOR_SAMPLE_INTERVAL = "ChannelMonitor:SampleInterval"
WPAN_CHANNEL_MONITOR_RSSI_THRESHOLD = "ChannelMonitor:RssiThreshold"
WPAN_CHANNEL_MONITOR_SAMPLE_WINDOW = "ChannelMonitor:SampleWindow"
WPAN_CHANNEL_MONITOR_SAMPLE_COUNT = "ChannelMonitor:SampleCount"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY = "ChannelMonitor:ChannelQuality"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY_ASVALMAP = "ChannelMonitor:ChannelQuality:AsValMap"
WPAN_CHANNEL_MANAGER_NEW_CHANNEL = "ChannelManager:NewChannel"
WPAN_CHANNEL_MANAGER_DELAY = "ChannelManager:Delay"
WPAN_CHANNEL_MANAGER_CHANNEL_SELECT = "ChannelManager:ChannelSelect"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_ENABLED = "ChannelManager:AutoSelect:Enabled"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_INTERVAL = "ChannelManager:AutoSelect:Interval"
WPAN_CHANNEL_MANAGER_SUPPORTED_CHANNEL_MASK = "ChannelManager:SupportedChannelMask"
WPAN_CHANNEL_MANAGER_FAVORED_CHANNEL_MASK = "ChannelManager:FavoredChannelMask"
WPAN_NCP_COUNTER_ALL_MAC = "NCP:Counter:AllMac"
WPAN_NCP_COUNTER_ALL_MAC_ASVALMAP = "NCP:Counter:AllMac:AsValMap"
WPAN_NCP_COUNTER_TX_PKT_TOTAL = "NCP:Counter:TX_PKT_TOTAL"
WPAN_NCP_COUNTER_TX_PKT_UNICAST = "NCP:Counter:TX_PKT_UNICAST"
WPAN_NCP_COUNTER_TX_PKT_BROADCAST = "NCP:Counter:TX_PKT_BROADCAST"
WPAN_NCP_COUNTER_TX_PKT_ACK_REQ = "NCP:Counter:TX_PKT_ACK_REQ"
WPAN_NCP_COUNTER_TX_PKT_ACKED = "NCP:Counter:TX_PKT_ACKED"
WPAN_NCP_COUNTER_TX_PKT_NO_ACK_REQ = "NCP:Counter:TX_PKT_NO_ACK_REQ"
WPAN_NCP_COUNTER_TX_PKT_DATA = "NCP:Counter:TX_PKT_DATA"
WPAN_NCP_COUNTER_TX_PKT_DATA_POLL = "NCP:Counter:TX_PKT_DATA_POLL"
WPAN_NCP_COUNTER_TX_PKT_BEACON = "NCP:Counter:TX_PKT_BEACON"
WPAN_NCP_COUNTER_TX_PKT_BEACON_REQ = "NCP:Counter:TX_PKT_BEACON_REQ"
WPAN_NCP_COUNTER_TX_PKT_OTHER = "NCP:Counter:TX_PKT_OTHER"
WPAN_NCP_COUNTER_TX_PKT_RETRY = "NCP:Counter:TX_PKT_RETRY"
WPAN_NCP_COUNTER_TX_ERR_CCA = "NCP:Counter:TX_ERR_CCA"
WPAN_NCP_COUNTER_TX_ERR_ABORT = "NCP:Counter:TX_ERR_ABORT"
WPAN_NCP_COUNTER_RX_PKT_TOTAL = "NCP:Counter:RX_PKT_TOTAL"
WPAN_NCP_COUNTER_RX_PKT_UNICAST = "NCP:Counter:RX_PKT_UNICAST"
WPAN_NCP_COUNTER_RX_PKT_BROADCAST = "NCP:Counter:RX_PKT_BROADCAST"
WPAN_NCP_COUNTER_RX_PKT_DATA = "NCP:Counter:RX_PKT_DATA"
WPAN_NCP_COUNTER_RX_PKT_DATA_POLL = "NCP:Counter:RX_PKT_DATA_POLL"
WPAN_NCP_COUNTER_RX_PKT_BEACON = "NCP:Counter:RX_PKT_BEACON"
WPAN_NCP_COUNTER_RX_PKT_BEACON_REQ = "NCP:Counter:RX_PKT_BEACON_REQ"
WPAN_NCP_COUNTER_RX_PKT_OTHER = "NCP:Counter:RX_PKT_OTHER"
WPAN_NCP_COUNTER_RX_PKT_FILT_WL = "NCP:Counter:RX_PKT_FILT_WL"
WPAN_NCP_COUNTER_RX_PKT_FILT_DA = "NCP:Counter:RX_PKT_FILT_DA"
WPAN_NCP_COUNTER_RX_ERR_EMPTY = "NCP:Counter:RX_ERR_EMPTY"
WPAN_NCP_COUNTER_RX_ERR_UKWN_NBR = "NCP:Counter:RX_ERR_UKWN_NBR"
WPAN_NCP_COUNTER_RX_ERR_NVLD_SADDR = "NCP:Counter:RX_ERR_NVLD_SADDR"
WPAN_NCP_COUNTER_RX_ERR_SECURITY = "NCP:Counter:RX_ERR_SECURITY"
WPAN_NCP_COUNTER_RX_ERR_BAD_FCS = "NCP:Counter:RX_ERR_BAD_FCS"
WPAN_NCP_COUNTER_RX_ERR_OTHER = "NCP:Counter:RX_ERR_OTHER"
WPAN_NCP_COUNTER_TX_IP_SEC_TOTAL = "NCP:Counter:TX_IP_SEC_TOTAL"
WPAN_NCP_COUNTER_TX_IP_INSEC_TOTAL = "NCP:Counter:TX_IP_INSEC_TOTAL"
WPAN_NCP_COUNTER_TX_IP_DROPPED = "NCP:Counter:TX_IP_DROPPED"
WPAN_NCP_COUNTER_RX_IP_SEC_TOTAL = "NCP:Counter:RX_IP_SEC_TOTAL"
WPAN_NCP_COUNTER_RX_IP_INSEC_TOTAL = "NCP:Counter:RX_IP_INSEC_TOTAL"
WPAN_NCP_COUNTER_RX_IP_DROPPED = "NCP:Counter:RX_IP_DROPPED"
WPAN_NCP_COUNTER_TX_SPINEL_TOTAL = "NCP:Counter:TX_SPINEL_TOTAL"
WPAN_NCP_COUNTER_RX_SPINEL_TOTAL = "NCP:Counter:RX_SPINEL_TOTAL"
WPAN_NCP_COUNTER_RX_SPINEL_ERR = "NCP:Counter:RX_SPINEL_ERR"
WPAN_NCP_COUNTER_IP_TX_SUCCESS = "NCP:Counter:IP_TX_SUCCESS"
WPAN_NCP_COUNTER_IP_RX_SUCCESS = "NCP:Counter:IP_RX_SUCCESS"
WPAN_NCP_COUNTER_IP_TX_FAILURE = "NCP:Counter:IP_TX_FAILURE"
WPAN_NCP_COUNTER_IP_RX_FAILURE = "NCP:Counter:IP_RX_FAILURE"
# ----------------------------------------------------------------------------------------------------------------------
# Valid state values
STATE_UNINITIALIZED = '"uninitialized"'
STATE_FAULT = '"uninitialized:fault"'
STATE_UPGRADING = '"uninitialized:upgrading"'
STATE_DEEP_SLEEP = '"offline:deep-sleep"'
STATE_OFFLINE = '"offline"'
STATE_COMMISSIONED = '"offline:commissioned"'
STATE_ASSOCIATING = '"associating"'
STATE_CREDENTIALS_NEEDED = '"associating:credentials-needed"'
STATE_ASSOCIATED = '"associated"'
STATE_ISOLATED = '"associated:no-parent"'
STATE_NETWAKE_ASLEEP = '"associated:netwake-asleep"'
STATE_NETWAKE_WAKING = '"associated:netwake-waking"'
# -----------------------------------------------------------------------------------------------------------------------
# MCU Power state from `WPAN_NCP_MCU_POWER_STATE`
MCU_POWER_STATE_ON = '"on"'
MCU_POWER_STATE_LOW_POWER = '"low-power"'
MCU_POWER_STATE_OFF = '"off"'
# -----------------------------------------------------------------------------------------------------------------------
# Node types (from `WPAN_NODE_TYPE` property)
NODE_TYPE_UNKNOWN = '"unknown"'
NODE_TYPE_LEADER = '"leader"'
NODE_TYPE_ROUTER = '"router"'
NODE_TYPE_END_DEVICE = '"end-device"'
NODE_TYPE_SLEEPY_END_DEVICE = '"sleepy-end-device"'
NODE_TYPE_COMMISSIONER = '"commissioner"'
NODE_TYPE_NEST_LURKER = '"nl-lurker"'
# -----------------------------------------------------------------------------------------------------------------------
# Node types used by `Node.join()`
JOIN_TYPE_ROUTER = 'r'
JOIN_TYPE_END_DEVICE = 'e'
JOIN_TYPE_SLEEPY_END_DEVICE = 's'
# -----------------------------------------------------------------------------------------------------------------------
# Address Cache Table Entry States
ADDRESS_CACHE_ENTRY_STATE_CACHED = "cached"
ADDRESS_CACHE_ENTRY_STATE_SNOOPED = "snooped"
ADDRESS_CACHE_ENTRY_STATE_QUERY = "query"
ADDRESS_CACHE_ENTRY_STATE_RETRY_QUERY = "retry-query"
# -----------------------------------------------------------------------------------------------------------------------
# Bit Flags for Thread Device Mode `WPAN_THREAD_DEVICE_MODE`
THREAD_MODE_FLAG_FULL_NETWORK_DATA = (1 << 0)
THREAD_MODE_FLAG_FULL_THREAD_DEV = (1 << 1)
THREAD_MODE_FLAG_SECURE_DATA_REQUEST = (1 << 2)
THREAD_MODE_FLAG_RX_ON_WHEN_IDLE = (1 << 3)
_OT_BUILDDIR = os.getenv('top_builddir', '../..')
_WPANTUND_PREFIX = os.getenv('WPANTUND_PREFIX', '/usr/local')
# -----------------------------------------------------------------------------------------------------------------------
def _log(text, new_line=True, flush=True):
sys.stdout.write(text)
if new_line:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
# -----------------------------------------------------------------------------------------------------------------------
# Node class
class Node(object):
""" A wpantund OT NCP instance """
# defines the default verbosity setting (can be changed per `Node`)
_VERBOSE = os.getenv('TORANJ_VERBOSE',
'no').lower() in ['true', '1', 't', 'y', 'yes', 'on']
_SPEED_UP_FACTOR = 1 # defines the default time speed up factor
# path to `wpantund`, `wpanctl`, `ot-ncp-ftd`,`ot-ncp` and `ot-rcp`
_WPANTUND = '%s/sbin/wpantund' % _WPANTUND_PREFIX
_WPANCTL = '%s/bin/wpanctl' % _WPANTUND_PREFIX
_OT_NCP_FTD = '%s/examples/apps/ncp/ot-ncp-ftd' % _OT_BUILDDIR
_OT_NCP_FTD_POSIX = '%s/src/posix/ot-ncp' % _OT_BUILDDIR
_OT_RCP = '%s/examples/apps/ncp/ot-rcp' % _OT_BUILDDIR
# Environment variable used to determine how to run OpenThread
# If set to 1, then posix NCP (`ot-ncp`) is used along with a posix RCP `ot-rcp`.
# Otherwise, the posix NCP `ot-ncp-ftd` is used
_POSIX_ENV_VAR = 'TORANJ_POSIX_RCP_MODEL'
# determines if the wpantund logs are saved in file or sent to stdout
_TUND_LOG_TO_FILE = True
# name of wpantund log file (if # name of wpantund _TUND_LOG_TO_FILE is
# True)
_TUND_LOG_FNAME = 'wpantund-logs'
# interface name
_INTFC_NAME_PREFIX = 'utun' if sys.platform == 'darwin' else 'wpan'
_START_INDEX = 4 if sys.platform == 'darwin' else 1
_cur_index = _START_INDEX
_all_nodes = weakref.WeakSet()
def __init__(self, verbose=_VERBOSE):
"""Creates a new `Node` instance"""
index = Node._cur_index
Node._cur_index += 1
self._index = index
self._interface_name = self._INTFC_NAME_PREFIX + str(index)
self._verbose = verbose
# Check if env variable `TORANJ_POSIX_RCP_MODEL` is defined
# and use it to determine if to use operate in "posix-ncp-app".
if self._POSIX_ENV_VAR in os.environ:
self._use_posix_with_rcp = (os.environ[self._POSIX_ENV_VAR] in [
'1', 'yes'
])
else:
self._use_posix_with_rcp = False
if self._use_posix_with_rcp:
ncp_socket_path = 'system:{} -s {} spinel+hdlc+uart://{}?forkpty-arg={}'.format(
self._OT_NCP_FTD_POSIX, self._SPEED_UP_FACTOR, self._OT_RCP,
index)
else:
ncp_socket_path = 'system:{} {} {}'.format(self._OT_NCP_FTD, index,
self._SPEED_UP_FACTOR)
cmd = self._WPANTUND + \
' -o Config:NCP:SocketPath \"{}\"'.format(ncp_socket_path) + \
' -o Config:TUN:InterfaceName {}'.format(self._interface_name) + \
' -o Config:NCP:DriverName spinel' + \
' -o Daemon:SyslogMask \"all -debug\"'
if Node._TUND_LOG_TO_FILE:
self._tund_log_file = open(
self._TUND_LOG_FNAME + str(index) + '.log', 'wb')
else:
self._tund_log_file = None
if self._verbose:
_log('$ Node{}.__init__() cmd: {}'.format(index, cmd))
self._wpantund_process = subprocess.Popen(cmd,
shell=True,
stderr=self._tund_log_file)
self._wpanctl_cmd = self._WPANCTL + ' -I ' + self._interface_name + ' '
# map from local_port to `AsyncReceiver` object
self._recvers = weakref.WeakValueDictionary()
Node._all_nodes.add(self)
def __del__(self):
self._wpantund_process.poll()
if self._wpantund_process.returncode is None:
self._wpantund_process.terminate()
self._wpantund_process.wait()
def __repr__(self):
return 'Node (index={}, interface_name={})'.format(
self._index, self._interface_name)
@property
def index(self):
return self._index
@property
def interface_name(self):
return self._interface_name
@property
def tund_log_file(self):
return self._tund_log_file
@property
def using_posix_with_rcp(self):
return self._use_posix_with_rcp
# ------------------------------------------------------------------------------------------------------------------
# Executing a `wpanctl` command
def wpanctl(self, cmd):
""" Runs a wpanctl command on the given wpantund/OT-NCP instance and returns the output """
if self._verbose:
_log('$ Node{}.wpanctl(\'{}\')'.format(self._index, cmd),
new_line=False)
result = subprocess.check_output(self._wpanctl_cmd + cmd,
shell=True,
stderr=subprocess.STDOUT)
if len(result) >= 1 and result[
-1] == '\n': # remove the last char if it is '\n',
result = result[:-1]
if self._verbose:
if '\n' in result:
_log(':')
for line in result.splitlines():
_log(' ' + line)
else:
_log(' -> \'{}\''.format(result))
return result
# ------------------------------------------------------------------------------------------------------------------
# APIs matching `wpanctl` commands.
def get(self, prop_name, value_only=True):
return self.wpanctl('get ' + ('-v ' if value_only else '') + prop_name)
def set(self, prop_name, value, binary_data=False):
return self._update_prop('set', prop_name, value, binary_data)
def add(self, prop_name, value, binary_data=False):
return self._update_prop('add', prop_name, value, binary_data)
def remove(self, prop_name, value, binary_data=False):
return self._update_prop('remove', prop_name, value, binary_data)
def _update_prop(self, action, prop_name, value, binary_data):
return self.wpanctl(action + ' ' + prop_name + ' ' +
('-d ' if binary_data else '') + '-v ' +
value) # use -v to handle values starting with `-`.
def reset(self):
return self.wpanctl('reset')
def status(self):
return self.wpanctl('status')
def leave(self):
return self.wpanctl('leave')
def form(self,
name,
channel=None,
channel_mask=None,
panid=None,
xpanid=None,
key=None,
key_index=None,
node_type=None,
mesh_local_prefix=None,
legacy_prefix=None):
return self.wpanctl(
'form \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -m {}'.format(channel_mask) if channel_mask is not None else ''
) + (' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') +
(' -i {}'.format(key_index) if key_index is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -M {}'.format(mesh_local_prefix
) if mesh_local_prefix is not None else '') +
(' -L {}'.format(legacy_prefix) if legacy_prefix is not None else ''
))
def join(self,
name,
channel=None,
node_type=None,
panid=None,
xpanid=None,
key=None):
return self.wpanctl(
'join \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') + (' -n'))
def active_scan(self, channel=None):
return self.wpanctl(
'scan' + (' -c {}'.format(channel) if channel is not None else ''))
def energy_scan(self, channel=None):
return self.wpanctl('scan -e' + (
' -c {}'.format(channel) if channel is not None else ''))
def discover_scan(self,
channel=None,
joiner_only=False,
enable_filtering=False,
panid_filter=None):
return self.wpanctl(
'scan -d' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -j' if joiner_only else '') +
(' -f' if enable_filtering else '') +
(' -p {}'.format(panid_filter) if panid_filter is not None else ''))
def permit_join(self, duration_sec=None, port=None, udp=True, tcp=True):
if not udp and not tcp: # incorrect use!
return ''
traffic_type = ''
if udp and not tcp:
traffic_type = ' --udp'
if tcp and not udp:
traffic_type = ' --tcp'
if port is not None and duration_sec is None:
duration_sec = '240'
return self.wpanctl(
'permit-join' +
(' {}'.format(duration_sec) if duration_sec is not None else '') +
(' {}'.format(port) if port is not None else '') + traffic_type)
def config_gateway(self, prefix, default_route=False, priority=None):
return self.wpanctl(
'config-gateway ' + prefix + (' -d' if default_route else '') +
(' -P {}'.format(priority) if priority is not None else ''))
def add_prefix(self,
prefix,
prefix_len=None,
priority=None,
stable=True,
on_mesh=False,
slaac=False,
dhcp=False,
configure=False,
default_route=False,
preferred=False):
return self.wpanctl(
'add-prefix ' + prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -P {}'.format(priority) if priority is not None else '') +
(' -s' if stable else '') + (' -f' if preferred else '') +
(' -a' if slaac else '') + (' -d' if dhcp else '') +
(' -c' if configure else '') + (' -r' if default_route else '') +
(' -o' if on_mesh else ''))
def remove_prefix(self, prefix, prefix_len=None):
return self.wpanctl('remove-prefix ' + prefix + (
' -l {}'.format(prefix_len) if prefix_len is not None else ''))
def add_route(self,
route_prefix,
prefix_len=None,
priority=None,
stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl(
'add-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else '') +
('' if stable else ' -n'))
def remove_route(self,
route_prefix,
prefix_len=None,
priority=None,
stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl(
'remove-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else ''))
def commissioner_start(self):
return self.wpanctl('commissioner start')
def commissioner_add_joiner(self, eui64, pskd, timeout='100'):
return self.wpanctl('commissioner joiner-add {} {} {}'.format(
eui64, timeout, pskd))
def joiner_join(self, pskd):
return self.wpanctl('joiner --join {}'.format(pskd))
def joiner_attach(self):
return self.wpanctl('joiner --attach')
# ------------------------------------------------------------------------------------------------------------------
# Helper methods
def is_associated(self):
return self.get(WPAN_STATE) == STATE_ASSOCIATED
def join_node(self, node, node_type=JOIN_TYPE_ROUTER, should_set_key=True):
"""Join a network specified by another node, `node` should be a Node"""
if not node.is_associated():
return "{} is not associated".format(node)
return self.join(
node.get(WPAN_NAME)[1:-1],
channel=node.get(WPAN_CHANNEL),
node_type=node_type,
panid=node.get(WPAN_PANID),
xpanid=node.get(WPAN_XPANID),
key=node.get(WPAN_KEY)[1:-1] if should_set_key else None)
def whitelist_node(self, node):
"""Adds a given node (of type `Node`) to the whitelist of `self` and enables whitelisting on `self`"""
self.add(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1])
self.set(WPAN_MAC_WHITELIST_ENABLED, '1')
def un_whitelist_node(self, node):
"""Removes a given node (of node `Node) from the whitelist"""
self.remove(WPAN_MAC_WHITELIST_ENTRIES,
node.get(WPAN_EXT_ADDRESS)[1:-1])
def is_in_scan_result(self, scan_result):
"""Checks if node is in the scan results
`scan_result` must be an array of `ScanResult` object (see `parse_scan_result`).
"""
joinable = (self.get(WPAN_NETWORK_ALLOW_JOIN) == 'true')
panid = self.get(WPAN_PANID)
xpanid = self.get(WPAN_XPANID)[2:]
name = self.get(WPAN_NAME)[1:-1]
channel = self.get(WPAN_CHANNEL)
ext_address = self.get(WPAN_EXT_ADDRESS)[1:-1]
for item in scan_result:
if all([
item.network_name == name, item.panid == panid,
item.xpanid == xpanid, item.channel == channel,
item.ext_address == ext_address,
(item.type == ScanResult.TYPE_DISCOVERY_SCAN) or
(item.joinable == joinable)
]):
return True
return False
def find_ip6_address_with_prefix(self, prefix):
"""Find an IPv6 address on node matching a given prefix.
`prefix` should be an string containing the prefix.
Returns a string containing the IPv6 address matching the prefix or empty string if no address found.
"""
if len(prefix) > 2 and prefix[-1] == ':' and prefix[-2] == ':':
prefix = prefix[:-1]
all_addrs = parse_list(self.get(WPAN_IP6_ALL_ADDRESSES))
matched_addr = [addr for addr in all_addrs if addr.startswith(prefix)]
return matched_addr[0] if len(matched_addr) >= 1 else ''
def add_ip6_address_on_interface(self, address, prefix_len=64):
"""Adds an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr add ' + address + \
'/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd,
shell=True,
stderr=subprocess.STDOUT)
return result
def remove_ip6_address_on_interface(self, address, prefix_len=64):
"""Removes an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr del ' + address + \
'/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd,
shell=True,
stderr=subprocess.STDOUT)
return result
# ------------------------------------------------------------------------------------------------------------------
# class methods
@classmethod
def init_all_nodes(cls, disable_logs=not _VERBOSE, wait_time=15):
"""Issues a `wpanctl.leave` on all `Node` objects and waits for them to be ready"""
random.seed(123456)
time.sleep(0.5)
for node in Node._all_nodes:
start_time = time.time()
while True:
try:
node._wpantund_process.poll()
if node._wpantund_process.returncode is not None:
print(
'Node {} wpantund instance has terminated unexpectedly'
.format(node))
if disable_logs:
node.set(WPAN_OT_LOG_LEVEL, '0')
node.leave()
except subprocess.CalledProcessError as e:
if (node._verbose):
_log(' -> \'{}\' exit code: {}'.format(
e.output, e.returncode))
interval = time.time() - start_time
if interval > wait_time:
print(
'Took too long to init node {} ({}>{} sec)'.format(
node, interval, wait_time))
raise
except BaseException:
raise
else:
break
time.sleep(0.4)
@classmethod
def finalize_all_nodes(cls):
"""Finalizes all previously created `Node` instances (stops the wpantund process)"""
for node in Node._all_nodes:
node._wpantund_process.terminate()
node._wpantund_process.wait()
@classmethod
def set_time_speedup_factor(cls, factor):
"""Sets up the time speed up factor - should be set before creating any `Node` objects"""
if len(Node._all_nodes) != 0:
raise Node._NodeError(
'set_time_speedup_factor() cannot be called after creating a `Node`'
)
Node._SPEED_UP_FACTOR = factor
# ------------------------------------------------------------------------------------------------------------------
# IPv6 message Sender and Receiver class
class _NodeError(Exception):
pass
def prepare_tx(self, src, dst, data=40, count=1, mcast_hops=None):
"""Prepares an IPv6 msg transmission.
- `src` and `dst` can be either a string containing IPv6 address, or a tuple (ipv6 address as string, port),
if no port is given, a random port number is used.
- `data` can be either a string containing the message to be sent, or an int indicating size of the message (a
random message with the given length will be used).
- `count` gives number of times the message will be sent (default is 1).
- `mcast_hops` specifies multicast hop limit (only applicable for multicast tx).
Returns an `AsyncSender` object.
"""
if isinstance(src, tuple):
src_addr = src[0]
src_port = src[1]
else:
src_addr = src
src_port = random.randint(49152, 65535)
if isinstance(dst, tuple):
dst_addr = dst[0]
dst_port = dst[1]
else:
dst_addr = dst
dst_port = random.randint(49152, 65535)
if isinstance(data, int):
# create a random message with the given length.
all_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,><?;:[]=-+)(*&^%$#@'
msg = ''.join(random.choice(all_chars) for _ in range(data))
else:
msg = data
return AsyncSender(self, src_addr, src_port, dst_addr, dst_port, msg,
count, mcast_hops)
def _get_receiver(self, local_port):
# Gets or creates a receiver (an `AsyncReceiver`) tied to given port
# number
if local_port in self._recvers:
receiver = self._recvers[local_port]
else:
receiver = AsyncReceiver(self, local_port)
self._recvers[local_port] = receiver
return receiver
def _remove_recver(self, recvr):
# Removes a receiver from weak dictionary - called when the receiver is
# done and its socket is closed
local_port = recvr.local_port
if local_port in self._recvers:
del self._recvers[local_port]
def prepare_rx(self, sender):
"""Prepare to receive messages from a sender (an `AsyncSender`)"""
receiver = self._get_receiver(sender.dst_port)
receiver._add_sender(sender.src_addr, sender.src_port, sender.msg,
sender.count)
return receiver
def prepare_listener(self, local_port, timeout=1):
"""Prepares a listener (an `AsyncReceiver`) listening on the given `local_port` for given `timeout` (sec)"""
receiver = self._get_receiver(local_port)
receiver._set_listen_timeout(timeout)
return receiver
@staticmethod
def perform_async_tx_rx(timeout=20):
"""Called to perform all previously prepared async rx/listen and tx operations"""
try:
start_time = time.time()
while asyncore.socket_map:
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
print('Performing aysnc tx/tx took too long ({}>{} sec)'.
format(elapsed_time, timeout))
raise Node._NodeError(
'perform_tx_rx timed out ({}>{} sec)'.format(
elapsed_time, timeout))
# perform a single asyncore loop
asyncore.loop(timeout=0.5, count=1)
except BaseException:
print('Failed to perform async rx/tx')
raise
# -----------------------------------------------------------------------------------------------------------------------
# `AsyncSender` and `AsyncReceiver classes
_SO_BINDTODEVICE = 25
def _is_ipv6_addr_link_local(ip_addr):
"""Indicates if a given IPv6 address is link-local"""
return ip_addr.lower().startswith('fe80::')
def _create_socket_address(ip_address, port):
"""Convert a given IPv6 address (string) and port number into a socket address"""
# `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr`
# (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.).
return socket.getaddrinfo(ip_address, port)[0][4]
class AsyncSender(asyncore.dispatcher):
""" An IPv6 async message sender - use `Node.prepare_tx()` to create one"""
def __init__(self,
node,
src_addr,
src_port,
dst_addr,
dst_port,
msg,
count,
mcast_hops=None):
self._node = node
self._src_addr = src_addr
self._src_port = src_port
self._dst_addr = dst_addr
self._dst_port = dst_port
self._msg = msg
self._count = count
self._dst_sock_addr = _create_socket_address(dst_addr, dst_port)
self._tx_buffer = self._msg
self._tx_counter = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE,
node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Set the IPV6_MULTICAST_HOPS
if mcast_hops is not None:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS,
mcast_hops)
# Bind the socket to the given src address
if _is_ipv6_addr_link_local(src_addr):
# If src is a link local address it requires the interface name to
# be specified.
src_sock_addr = _create_socket_address(
src_addr + '%' + node.interface_name, src_port)
else:
src_sock_addr = _create_socket_address(src_addr, src_port)
sock.bind(src_sock_addr)
asyncore.dispatcher.__init__(self, sock)
# Property getters
@property
def node(self):
return self._node
@property
def src_addr(self):
return self._src_addr
@property
def src_port(self):
return self._src_port
@property
def dst_addr(self):
return self._dst_addr
@property
def dst_port(self):
return self._dst_port
@property
def msg(self):
return self._msg
@property
def count(self):
return self._count
@property
def was_successful(self):
"""Indicates if the transmission of IPv6 messages finished successfully"""
return self._tx_counter == self._count
# asyncore.dispatcher callbacks
def readable(self):
return False
def writable(self):
return True
def handle_write(self):
sent_len = self.sendto(self._tx_buffer, self._dst_sock_addr)
if self._node._verbose:
if sent_len < 30:
info_text = '{} bytes ("{}")'.format(sent_len,
self._tx_buffer[:sent_len])
else:
info_text = '{} bytes'.format(sent_len)
_log('- Node{} sent {} to [{}]:{} from [{}]:{}'.format(
self._node._index, info_text, self._dst_addr, self._dst_port,
self._src_addr, self._src_port))
self._tx_buffer = self._tx_buffer[sent_len:]
if len(self._tx_buffer) == 0:
self._tx_counter += 1
if self._tx_counter < self._count:
self._tx_buffer = self._msg
else:
self.handle_close()
def handle_close(self):
self.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AsyncReceiver(asyncore.dispatcher):
""" An IPv6 async message receiver - use `prepare_rx()` to create one"""
_MAX_RECV_SIZE = 2048
class _SenderInfo(object):
def __init__(self, sender_addr, sender_port, msg, count):
self._sender_addr = sender_addr
self._sender_port = sender_port
self._msg = msg
self._count = count
self._rx_counter = 0
def _check_received(self, msg, sender_addr, sender_port):
if self._msg == msg and self._sender_addr == sender_addr and self._sender_port == sender_port:
self._rx_counter += 1
return self._did_recv_all()
def _did_recv_all(self):
return self._rx_counter >= self._count
def __init__(self, node, local_port):
self._node = node
self._local_port = local_port
self._senders = [] # list of `_SenderInfo` objects
# contains all received messages as a list of (pkt, (src_addr,
# src_port))
self._all_rx = []
self._timeout = 0 # listen timeout (zero means forever)
self._started = False
self._start_time = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE,
node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind the socket to any IPv6 address with the given local port
local_sock_addr = _create_socket_address('::', local_port)
sock.bind(local_sock_addr)
asyncore.dispatcher.__init__(self, sock)
def _add_sender(self, sender_addr, sender_port, msg, count):
self._senders.append(
AsyncReceiver._SenderInfo(sender_addr, sender_port, msg, count))
def _set_listen_timeout(self, timeout):
self._timeout = timeout
# Property getters
@property
def node(self):
return self._node
@property
def local_port(self):
return self._local_port
@property
def all_rx_msg(self):
"""returns all received messages as a list of (msg, (src_addr, src_port))"""
return self._all_rx
@property
def was_successful(self):
"""Indicates if all expected IPv6 messages were received successfully"""
return len(self._senders) == 0 or all(
[sender._did_recv_all() for sender in self._senders])
# asyncore.dispatcher callbacks
def readable(self):
if not self._started:
self._start_time = time.time()
self._started = True
if self._timeout != 0 and time.time(
) - self._start_time >= self._timeout:
self.handle_close()
if self._node._verbose:
_log(
'- Node{} finished listening on port {} for {} sec, received {} msg(s)'
.format(self._node._index, self._local_port, self._timeout,
len(self._all_rx)))
return False
return True
def writable(self):
return False
def handle_read(self):
(msg, src_sock_addr) = self.recvfrom(AsyncReceiver._MAX_RECV_SIZE)
src_addr = src_sock_addr[0]
src_port = src_sock_addr[1]
if (_is_ipv6_addr_link_local(src_addr)):
if '%' in src_addr:
# remove the interface name from address
src_addr = src_addr.split('%')[0]
if self._node._verbose:
if len(msg) < 30:
info_text = '{} bytes ("{}")'.format(len(msg), msg)
else:
info_text = '{} bytes'.format(len(msg))
_log('- Node{} received {} on port {} from [{}]:{}'.format(
self._node._index, info_text, self._local_port, src_addr,
src_port))
self._all_rx.append((msg, (src_addr, src_port)))
if all([
sender._check_received(msg, src_addr, src_port)
for sender in self._senders
]):
self.handle_close()
def handle_close(self):
self.close()
# remove the receiver from the node once the socket is closed
self._node._remove_recver(self)
# -----------------------------------------------------------------------------------------------------------------------
class VerifyError(Exception):
pass
_is_in_verify_within = False
def verify(condition):
"""Verifies that a `condition` is true, otherwise raises a VerifyError"""
global _is_in_verify_within
if not condition:
calling_frame = inspect.currentframe().f_back
error_message = 'verify() failed at line {} in "{}"'.format(
calling_frame.f_lineno, calling_frame.f_code.co_filename)
if not _is_in_verify_within:
print(error_message)
raise VerifyError(error_message)
def verify_within(condition_checker_func, wait_time, delay_time=0.1):
"""Verifies that a given function `condition_checker_func` passes successfully within a given wait timeout.
`wait_time` is maximum time waiting for condition_checker to pass (in seconds).
`delay_time` specifies a delay interval added between failed attempts (in seconds).
"""
global _is_in_verify_within
start_time = time.time()
old_is_in_verify_within = _is_in_verify_within
_is_in_verify_within = True
while True:
try:
condition_checker_func()
except VerifyError as e:
if time.time() - start_time > wait_time:
print('Took too long to pass the condition ({}>{} sec)'.format(
time.time() - start_time, wait_time))
print(e.message)
raise e
except BaseException:
raise
else:
break
if delay_time != 0:
time.sleep(delay_time)
_is_in_verify_within = old_is_in_verify_within
# -----------------------------------------------------------------------------------------------------------------------
# Parsing `wpanctl` output
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ScanResult(object):
""" This object encapsulates a scan result (active/discover/energy scan)"""
TYPE_ACTIVE_SCAN = 'active-scan'
TYPE_DISCOVERY_SCAN = 'discover-scan'
TYPE_ENERGY_SCAN = 'energy-scan'
def __init__(self, result_text):
items = [item.strip() for item in result_text.split('|')]
if len(items) == 8:
self._type = ScanResult.TYPE_ACTIVE_SCAN
self._index = items[0]
self._joinable = (items[1] == 'YES')
self._network_name = items[2][1:-1]
self._panid = items[3]
self._channel = items[4]
self._xpanid = items[5]
self._ext_address = items[6]
self._rssi = items[7]
elif len(items) == 7:
self._type = ScanResult.TYPE_DISCOVERY_SCAN
self._index = items[0]
self._network_name = items[1][1:-1]
self._panid = items[2]
self._channel = items[3]
self._xpanid = items[4]
self._ext_address = items[5]
self._rssi = items[6]
elif len(items) == 2:
self._type = ScanResult.TYPE_ENERGY_SCAN
self._channel = items[0]
self._rssi = items[1]
else:
raise ValueError(
'"{}" does not seem to be a valid scan result string'.
result_text)
@property
def type(self):
return self._type
@property
def joinable(self):
return self._joinable
@property
def network_name(self):
return self._network_name
@property
def panid(self):
return self._panid
@property
def channel(self):
return self._channel
@property
def xpanid(self):
return self._xpanid
@property
def ext_address(self):
return self._ext_address
@property
def rssi(self):
return self._rssi
def __repr__(self):
return 'ScanResult({})'.format(self.__dict__)
def parse_scan_result(scan_result):
""" Parses scan result string and returns an array of `ScanResult` objects"""
return [ScanResult(item) for item in scan_result.split('\n')[2:]
] # skip first two lines which are table headers
def parse_list(list_string):
"""
Parses IPv6/prefix/route list string (output of wpanctl get for properties WPAN_IP6_ALL_ADDRESSES,
IP6_MULTICAST_ADDRESSES, WPAN_THREAD_ON_MESH_PREFIXES, ...)
Returns an array of strings each containing an IPv6/prefix/route entry.
"""
# List string example (get(WPAN_IP6_ALL_ADDRESSES) output):
#
# '[\n
# \t"fdf4:5632:4940:0:8798:8701:85d4:e2be prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# \t"fe80::2092:9358:97ea:71c6 prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# ]'
#
# We split the lines ('\n' as separator) and skip the first and last lines which are '[' and ']'.
# For each line, skip the first two characters (which are '\t"') and last character ('"'), then split the string
# using whitespace as separator. The first entry is the IPv6 address.
#
return [line[2:-1].split()[0] for line in list_string.split('\n')[1:-1]]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class OnMeshPrefix(object):
""" This object encapsulates an on-mesh prefix"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:abba:cafe:: prefix_len:64 origin:user stable:yes flags:0x31'
# ' [on-mesh:1 def-route:0 config:0 dhcp:0 slaac:1 pref:1 prio:med] rloc:0x0000"'
m = re.match(
r'\t"([0-9a-fA-F:]+)\s*prefix_len:(\d+)\s+origin:(\w*)\s+stable:(\w*).* \['
+
r'on-mesh:(\d)\s+def-route:(\d)\s+config:(\d)\s+dhcp:(\d)\s+slaac:(\d)\s+pref:(\d)\s+prio:(\w*)\]'
+ r'\s+rloc:(0x[0-9a-fA-F]+)', text)
verify(m is not None)
data = m.groups()
self._prefix = data[0]
self._prefix_len = data[1]
self._origin = data[2]
self._stable = (data[3] == 'yes')
self._on_mesh = (data[4] == '1')
self._def_route = (data[5] == '1')
self._config = (data[6] == '1')
self._dhcp = (data[7] == '1')
self._slaac = (data[8] == '1')
self._preferred = (data[9] == '1')
self._priority = (data[10])
self._rloc16 = (data[11])
@property
def prefix(self):
return self._prefix
@property
def prefix_len(self):
return self._prefix_len
@property
def origin(self):
return self._origin
@property
def priority(self):
return self._priority
def is_stable(self):
return self._stable
def is_on_mesh(self):
return self._on_mesh
def is_def_route(self):
return self._def_route
def is_config(self):
return self._config
def is_dhcp(self):
return self._dhcp
def is_slaac(self):
return self._slaac
def is_preferred(self):
return self._preferred
def rloc16(self):
return self._rloc16
def __repr__(self):
return 'OnMeshPrefix({})'.format(self.__dict__)
def parse_on_mesh_prefix_result(on_mesh_prefix_list):
""" Parses on-mesh prefix list string and returns an array of `OnMeshPrefix` objects"""
return [
OnMeshPrefix(item) for item in on_mesh_prefix_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ChildEntry(object):
""" This object encapsulates a child entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"E24C5F67F4B8CBB9, RLOC16:d402, NetDataVer:175, LQIn:3, AveRssi:-20, LastRssi:-20, Timeout:120, Age:0, `
# `RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._timeout = dict['Timeout']
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
self._sec_data_req = (dict['SecDataReq'] == 'yes')
self._full_net_data = (dict['FullNetData'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def timeout(self):
return self._timeout
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_sec_data_req(self):
return self._sec_data_req
def is_full_net_data(self):
return self._full_net_data
def __repr__(self):
return 'ChildEntry({})'.format(self.__dict__)
def parse_child_table_result(child_table_list):
""" Parses child table list string and returns an array of `ChildEntry` objects"""
return [ChildEntry(item) for item in child_table_list.split('\n')[1:-1]]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class NeighborEntry(object):
""" This object encapsulates a neighbor entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"5AC95ED4646D6565, RLOC16:9403, LQIn:3, AveRssi:-20, LastRssi:-20, Age:0, LinkFC:8, MleFC:0, IsChild:yes,'
# 'RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._is_child = (dict['IsChild'] == 'yes')
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_child(self):
return self._is_child
def __repr__(self):
return 'NeighborEntry({})'.format(self.__dict__)
def parse_neighbor_table_result(neighbor_table_list):
""" Parses neighbor table list string and returns an array of `NeighborEntry` objects"""
return [
NeighborEntry(item) for item in neighbor_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class RouterTableEntry(object):
""" This object encapsulates a router table entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"8A970B3251810826, RLOC16:4000, RouterId:16, NextHop:43, PathCost:1, LQIn:3, LQOut:3, Age:3, LinkEst:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[1:]}
self._rloc16 = int(dict['RLOC16'], 16)
self._router_id = int(dict['RouterId'], 0)
self._next_hop = int(dict['NextHop'], 0)
self._path_cost = int(dict['PathCost'], 0)
self._age = int(dict['Age'], 0)
self._le = (dict['LinkEst'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def router_id(self):
return self._router_id
@property
def next_hop(self):
return self._next_hop
@property
def path_cost(self):
return self._path_cost
def is_link_established(self):
return self._le
def __repr__(self):
return 'RouterTableEntry({})'.format(self.__dict__)
def parse_router_table_result(router_table_list):
""" Parses router table list string and returns an array of `RouterTableEntry` objects"""
return [
RouterTableEntry(item) for item in router_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AddressCacheEntry(object):
""" This object encapsulates an address cache entry"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:1234::100:8 -> 0xfffe, Age:1, State:query, CanEvict:no, Timeout:3, RetryDelay:15"`
# '\t"fd00:1234::3:2 -> 0x2000, Age:0, State:cached, LastTrans:0, ML-EID:fd40:ea58:a88c:0:b7ab:4919:aa7b:11a3"`
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._address = items[0]
self._rloc16 = int(items[2], 16)
# Convert the rest into a dictionary by splitting the text using ':' as
# separator
dict = {item.split(':')[0]: item.split(':')[1] for item in items[3:]}
self._age = int(dict['Age'], 0)
self._state = dict['State']
if self._state == ADDRESS_CACHE_ENTRY_STATE_CACHED:
self._last_trans = int(dict.get("LastTrans", "-1"), 0)
else:
self._can_evict = (dict['CanEvict'] == 'yes')
self._timeout = int(dict['Timeout'])
self._retry_delay = int(dict['RetryDelay'])
@property
def address(self):
return self._address
@property
def rloc16(self):
return self._rloc16
@property
def age(self):
return self._age
@property
def state(self):
return self._state
def can_evict(self):
return self._can_evict
@property
def timeout(self):
return self._timeout
@property
def retry_delay(self):
return self._retry_delay
@property
def last_trans(self):
return self._last_trans
def __repr__(self):
return 'AddressCacheEntry({})'.format(self.__dict__)
def parse_address_cache_table_result(addr_cache_table_list):
""" Parses address cache table list string and returns an array of `AddressCacheEntry` objects"""
return [
AddressCacheEntry(item)
for item in addr_cache_table_list.split('\n')[1:-1]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class InterfaceRoute(object):
""" This object encapsulates an interface route entry"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:abba::/64 metric:256 "'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [
item[:-1] if item[-1] == ',' else item
for item in text[2:-1].split()
]
# First item in the extended address
self._route_prefix = items[0].split('/')[0]
self._prefix_len = int(items[0].split('/')[1], 0)
self._metric = int(items[1].split(':')[1], 0)
@property
def route_prefix(self):
return self._route_prefix
@property
def prefix_len(self):
return self._prefix_len
@property
def metric(self):
return self._metric
def __repr__(self):
return 'InterfaceRoute({})'.format(self.__dict__)
def parse_interface_routes_result(interface_routes_list):
""" Parses interface routes list string and returns an array of `InterfaceRoute` objects"""
return [
InterfaceRoute(item) for item in interface_routes_list.split('\n')[1:-1]
]
|
bsd-3-clause
| -7,788,658,360,895,456,000
| 36.195298
| 121
| 0.568218
| false
| 3.540072
| false
| false
| false
|
realspencerdupre/PoS_Sourcecoin
|
contrib/linearize/linearize-hashes.py
|
1
|
2763
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = CM_RPC
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
mit
| 8,287,842,049,013,357,000
| 25.066038
| 78
| 0.6616
| false
| 2.951923
| false
| false
| false
|
xsteadfastx/bib-api
|
app/mod_api/views.py
|
1
|
7896
|
import arrow
from flask import current_app, jsonify, request, Response, g
from itsdangerous import URLSafeSerializer
from app.mod_api import mod_api, schemes
from app.mod_api.decorators import valid_facility, valid_token
from app.mod_api.errors import InvalidUsage
from app.mod_api.ical import build_ical
@mod_api.route('/facilities', methods=['GET'])
def facility_list():
"""List all available facilities.
Request::
http GET "localhost:5000/api/facilities"
Response::
{
"facilities": {
"wolfsburg": {
"city": "Wolfsburg",
"name": "Stadtbibliothek Wolfsburg",
"url": "http://webopac.stadt.wolfsburg.de"
}
}
}
"""
facilities = {}
for i, j in current_app.facilities.items():
facilities[i] = j['metadata']
return jsonify(facilities=facilities)
@mod_api.route('/<facility>/search', methods=['POST'])
@valid_facility
def search(facility):
"""Search library for items.
It takes a JSON-object in a POST request. You also can put a
request argument with the name "page" to the url for pagnation.
Most times the library search forms return more items then they fit on a
single page. So they need some kind of pagination. If the page argument
is given, it will search for the page number and browse to that page
before parsing the result. If not given, it will use the page number "1".
Here is a example:
Request::
http POST "localhost:5000/api/wolfsburg/search?page=4" term="batman"
Response::
{
"next_page": 5,
"results": [
{
"annotation": "Der Schurke Two-Face wurde durch...",
"author": "geschrieben von Matthew K. Manning.",
"copies": [
{
"available": false,
"branch": "01:Kinderbibl. Zentr",
"due_date": "2016-02-05",
"id": "M1400963",
"position": "4.1 Mann",
"type": "Kinder- und Jugendliteratur"
}
],
"cover": "http://foo.bar/images/P/3555829.03.MZZZZZZZ.jpg",
"isbn": "978-3-596-85582-7",
"title": "Batman - Ein finsterer Plan",
"year": "2013-01-01"
}
]
}
:param facility: The facility to search in.
:type facility: str
"""
# get term and validate it
json_data, errors = schemes.SearchRequest().load(request.get_json())
if errors:
raise InvalidUsage(errors)
# parse request args for page
if request.args.get('page'):
if not request.args.get('page').isdigit():
raise InvalidUsage('page type not integer')
page = int(request.args.get('page'))
else:
page = 1
# perform search and marshmallow it
results = current_app.facilities[facility]['search'](json_data['term'],
page)
data = schemes.SearchResponse().dump(results)
return jsonify(data.data)
@mod_api.route('/<facility>/token', methods=['POST'])
@valid_facility
def get_token(facility):
"""Creates a authentication token.
This endpoint returns a authentication token for a specific facility.
Request::
http POST localhost:5000/api/wolfsburg/token username=foo password=bar
Response::
{
"token": "eyJwYXNzd29yZCI6IjoiZm9vIn0.DmRMyew4ukCAZHsnIrs4PaY8"
}
:param facility: The facility to get a token for.
:type facility: str
"""
post_data = request.get_json()
# if there is no data raise an error
if not post_data:
raise InvalidUsage('no data')
# get authentication data and validate it
json_data, errors = schemes.TokenRequest().load(post_data)
if errors:
raise InvalidUsage(errors)
# create serializer
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
# create token
token = s.dumps(json_data)
# scheme it
data = schemes.TokenResponse().dump({'token': token})
return jsonify(data.data)
@mod_api.route('/<facility>/lent', methods=['GET'])
@valid_facility
@valid_token
def lent_list(facility):
"""Returns a list of lent items and the saldo of the account.
This view returns all lent items in a list with the title and author
plus the date until the item needs to get returned. It also tries to get
the saldo of the account.
Request::
http GET localhost:5000/api/wolfsburg/lent?token=pIUBfh1BSvoROF8wgHse
Response::
{
'saldo': '-36,00',
'items': [
{
'due_date': '2016-04-15', 'author': 'Dürer, Albrecht',
'title': 'Albrecht Dürer'
}, {
'due_date': '2016-04-15', 'author': 'Hopkins, John',
'title': 'Modezeichnen'
}, {
'due_date': '2016-04-15', 'author': 'Hopper, Edward',
'title': 'Edward Hopper'
}
]
}
:param facility: The facility to get a lent list from.
:type facility: str
"""
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
token = request.args['token']
userdata = s.loads(token)
lent_list = current_app.facilities[facility]['lent_list'](
userdata['username'], userdata['password'])
data = schemes.LentListResponse().dump(lent_list)
return jsonify(data.data)
@mod_api.route('/<facility>/ical/lent.ics', methods=['GET'])
@valid_facility
@valid_token
def lent_ical(facility):
"""Returns a calendar for all lent items in the ical format.
The calendar file includes all return dates for all lent items. It can be
used for importing them into other calendar software like the
Google calendar or Thunderbird Lightning.
Request::
http GET localhost:5000/api/wolfsburg/ical/lent.ics?token=pIUBfh1se
Response::
BEGIN:VCALENDAR
PRODID:ics.py - http://git.io/lLljaA
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20160609T101434Z
DTSTART:20160415T000000Z
SUMMARY:Bibliotheksrueckgaben: 2
DESCRIPTION:Dürer\, Albrecht: Albrecht Dürer\\nHopper\, Edward: Edward
UID:7a3fcb35-2cb4-48d3-ab56-2cf62af04337@7a3f.org
END:VEVENT
BEGIN:VEVENT
DTSTAMP:20160609T101434Z
DTSTART:20160420T000000Z
SUMMARY:Bibliotheksrueckgaben: 1
DESCRIPTION:Hopkins\, John: Modezeichnen
UID:86474116-dfd6-408f-9c2b-2e2cb552ab9b@8647.org
END:VEVENT
END:VCALENDAR
:param facility: The facility to get a lent list from.
:type facility: str
"""
s = URLSafeSerializer(current_app.config['SECRET_KEY'], salt=facility)
token = request.args['token']
# check if token already in redis
redis_entry = g.redis.hgetall(token)
if redis_entry:
two_hours_ago = arrow.utcnow().replace(hours=-2)
updated = arrow.get(redis_entry[b'updated'].decode('utf-8'))
if updated > two_hours_ago:
ical = redis_entry[b'ical'].decode('utf-8')
return Response(ical, mimetype='text/calendar')
userdata = s.loads(token)
lent_list = current_app.facilities[facility]['lent_list'](
userdata['username'], userdata['password'])
data = schemes.LentListResponse().dump(lent_list)
ical = build_ical(data.data)
# store new ical in redis
g.redis.hmset(token, dict(ical=ical, updated=arrow.utcnow()))
return Response(ical, mimetype='text/calendar')
|
mit
| -5,869,231,872,556,535,000
| 28.014706
| 79
| 0.589965
| false
| 3.71039
| false
| false
| false
|
navtejsingh/pychimera
|
chimera/centroid.py
|
1
|
2287
|
from __future__ import division
import numpy as np
from photutils.morphology import centroid_com, centroid_1dg, centroid_2dg
def recenter(image, pos, window_size = 15, method = "2dg"):
"""
Recenter each star in each frame of the image cube before performing
aperture photometry to take care of slight misalignments between frames
because of atmospheric turbulence and tracking/pointing errors.
Parameters
----------
image : numpy array
2D image
pos : list
List of (x,y) tuples for star positions
window_size : int
Window size in which to fit the gaussian to the star to calculate
new center
method : string
Method used to find center of the star. Options are 1d Gaussian fit,
2d gaussian fit or com (center of mass)
Returns
-------
xcen, ycen : float
Source x and y centers
"""
pos = np.asarray(pos)
ny, nx = image.shape
window_size = int(window_size)
nstars = pos.shape[0]
star_pos = np.zeros([nstars,2], dtype = np.float32)
for i in range(nstars):
x, y = pos[i][0], pos[i][1]
xmin, xmax = int(x) - int(window_size/2), int(x) + int(window_size/2) + 1
ymin, ymax = int(y) - int(window_size/2), int(y) + int(window_size/2) + 1
if xmin < 0:
xmin = 0
if ymin < 0:
ymin = 0
if xmax > nx:
xmax = nx
if ymax > ny:
ymax = ny
if method == "1dg":
xcen, ycen = centroid_1dg(image[ymin:ymax,xmin:xmax])
elif method == "2dg":
xcen, ycen = centroid_2dg(image[ymin:ymax,xmin:xmax])
elif method == "com":
xcen, ycen = centroid_com(image[ymin:ymax,xmin:xmax])
if (np.abs(xmin + xcen - x)) > 3. or (np.abs(ymin + ycen - y)) > 3.:
star_pos[i,0] = x
star_pos[i,1] = y
else:
star_pos[i,0] = xmin + xcen
star_pos[i,1] = ymin + ycen
return star_pos
|
bsd-3-clause
| 6,130,274,085,771,288,000
| 29.918919
| 94
| 0.493223
| false
| 3.850168
| false
| false
| false
|
lewisodriscoll/sasview
|
src/sas/sasgui/guiframe/local_perspectives/plotting/graphAppearance.py
|
3
|
10122
|
#!/usr/bin/python
"""
Dialog for general graph appearance
This software was developed by Institut Laue-Langevin as part of
Distributed Data Analysis of Neutron Scattering Experiments (DANSE).
Copyright 2012 Institut Laue-Langevin
"""
import wx
from sas.sasgui.plottools.SimpleFont import SimpleFont
COLOR = ['black', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow']
class graphAppearance(wx.Frame):
def __init__(self, parent, title, legend=True):
super(graphAppearance, self).__init__(parent, title=title, size=(520, 435))
self.legend = legend
self.InitUI()
self.Centre()
self.Show()
self.xfont = None
self.yfont = None
self.is_xtick = False
self.is_ytick = False
def InitUI(self):
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
xhbox1 = wx.BoxSizer(wx.HORIZONTAL)
xhbox2 = wx.BoxSizer(wx.HORIZONTAL)
yhbox1 = wx.BoxSizer(wx.HORIZONTAL)
yhbox2 = wx.BoxSizer(wx.HORIZONTAL)
if self.legend:
legendLocText = wx.StaticText(panel, label='Legend location: ')
self.legend_loc_combo = wx.ComboBox(panel, style=wx.CB_READONLY, size=(180, -1))
self.fillLegendLocs()
else:
self.legend_loc_combo = None
if self.legend:
self.toggle_legend = wx.CheckBox(panel, label='Toggle legend on/off')
else:
self.toggle_legend = None
self.toggle_grid = wx.CheckBox(panel, label='Toggle grid on/off')
xstatic_box = wx.StaticBox(panel, -1, 'x-axis label')
xstatic_box_sizer = wx.StaticBoxSizer(xstatic_box, wx.VERTICAL)
ystatic_box = wx.StaticBox(panel, -1, 'y-axis label')
ystatic_box_sizer = wx.StaticBoxSizer(ystatic_box, wx.VERTICAL)
xaxis_label = wx.StaticText(panel, label='X-axis: ')
yaxis_label = wx.StaticText(panel, label='Y-axis: ')
unitlabel_1 = wx.StaticText(panel, label='Units: ')
unitlabel_2 = wx.StaticText(panel, label='Units: ')
self.xaxis_text = wx.TextCtrl(panel, -1, "", size=(220, -1))
self.yaxis_text = wx.TextCtrl(panel, -1, "", size=(220, -1))
self.xaxis_unit_text = wx.TextCtrl(panel, -1, "", size=(100, -1))
self.yaxis_unit_text = wx.TextCtrl(panel, -1, "", size=(100, -1))
xcolorLabel = wx.StaticText(panel, label='Font color: ')
self.xfont_color = wx.ComboBox(panel, size=(100, -1), style=wx.CB_READONLY)
self.xfill_colors()
self.xfont_color.SetSelection(0)
xfont_button = wx.Button(panel, label='Font')
xfont_button.Bind(wx.EVT_BUTTON, self.on_x_font)
ycolorLabel = wx.StaticText(panel, label='Font color: ')
self.yfont_color = wx.ComboBox(panel, size=(100, -1), style=wx.CB_READONLY)
self.yfill_colors()
self.yfont_color.SetSelection(0)
yfont_button = wx.Button(panel, label='Font')
yfont_button.Bind(wx.EVT_BUTTON, self.on_y_font)
self.cancel_button = wx.Button(panel, label='Cancel')
self.ok_button = wx.Button(panel, label='OK')
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
self.ok_button.Bind(wx.EVT_BUTTON, self.on_ok)
xhbox1.Add(xaxis_label, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
xhbox1.Add(self.xaxis_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
xhbox1.Add(unitlabel_1, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
xhbox1.Add(self.xaxis_unit_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
yhbox1.Add(yaxis_label, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
yhbox1.Add(self.yaxis_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=10)
yhbox1.Add(unitlabel_2, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
yhbox1.Add(self.yaxis_unit_text, flag=wx.ALL | wx.EXPAND | wx.ALIGN_RIGHT, border=10)
xhbox2.Add(xcolorLabel, flag=wx.ALL | wx.ALIGN_RIGHT, border=10)
xhbox2.Add(self.xfont_color, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
xhbox2.Add(xfont_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
yhbox2.Add(ycolorLabel, flag=wx.ALL | wx.ALIGN_RIGHT, border=10)
yhbox2.Add(self.yfont_color, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
yhbox2.Add(yfont_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
if self.legend:
hbox1.Add(legendLocText, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
hbox1.Add(self.legend_loc_combo, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
if self.legend:
hbox1.Add((5, -1))
hbox1.Add(self.toggle_legend, flag=wx.ALL | wx.EXPAND | wx.ALIGN_LEFT, border=5)
hbox2.Add(self.ok_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
hbox2.Add(self.cancel_button, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
hbox2.Add((15, -1))
xstatic_box_sizer.Add(xhbox1, flag=wx.EXPAND, border=5)
xstatic_box_sizer.Add(xhbox2, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
ystatic_box_sizer.Add(yhbox1, flag=wx.EXPAND, border=5)
ystatic_box_sizer.Add(yhbox2, flag=wx.ALL | wx.ALIGN_RIGHT, border=5)
vbox.Add((-1, 20))
vbox.Add(hbox1, flag=wx.EXPAND | wx.ALL, border=5)
vbox.Add(xstatic_box_sizer, flag=wx.ALL | wx.EXPAND, border=10)
vbox.Add(ystatic_box_sizer, flag=wx.ALL | wx.EXPAND, border=10)
vbox.Add(self.toggle_grid, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=20)
vbox.Add(hbox2, flag=wx.ALIGN_RIGHT | wx.ALL, border=5)
panel.SetSizer(vbox)
def xfill_colors(self):
c_list = COLOR
for idx in range(len(c_list)):
self.xfont_color.Append(c_list[idx], idx)
def yfill_colors(self):
c_list = COLOR
for idx in range(len(c_list)):
self.yfont_color.Append(c_list[idx], idx)
def on_x_font(self, e):
title = 'Modify x axis font'
fonty = SimpleFont(self, wx.NewId(), title)
fonty.set_default_font(self.xfont)
fonty.set_ticklabel_check(self.is_xtick)
if fonty.ShowModal() == wx.ID_OK:
self.xfont = fonty.get_font()
self.is_xtick = fonty.get_ticklabel_check()
def on_y_font(self, e):
title = 'Modify y axis font'
fonty = SimpleFont(self, wx.NewId(), title)
fonty.set_default_font(self.yfont)
fonty.set_ticklabel_check(self.is_ytick)
if fonty.ShowModal() == wx.ID_OK:
self.yfont = fonty.get_font()
self.is_ytick = fonty.get_ticklabel_check()
def on_ok(self, e):
self.Close()
def on_cancel(self, e):
self.Destroy()
def get_loc_label(self):
"""
Associates label to a specific legend location
"""
_labels = {}
i = 0
_labels['best'] = i
i += 1
_labels['upper right'] = i
i += 1
_labels['upper left'] = i
i += 1
_labels['lower left'] = i
i += 1
_labels['lower right'] = i
i += 1
_labels['right'] = i
i += 1
_labels['center left'] = i
i += 1
_labels['center right'] = i
i += 1
_labels['lower center'] = i
i += 1
_labels['upper center'] = i
i += 1
_labels['center'] = i
return _labels
def fillLegendLocs(self):
# labels = []
# for label in self.get_loc_label():
# labels.append(str(label))
# for label in reversed(labels):
# self.legend_loc_combo.Append(label)
for label in self.get_loc_label():
self.legend_loc_combo.Append(label)
def setDefaults(self, grid, legend, xlab, ylab, xunit, yunit,
xaxis_font, yaxis_font, legend_loc,
xcolor, ycolor, is_xtick, is_ytick):
self.toggle_grid.SetValue(grid)
if self.legend:
self.toggle_legend.SetValue(legend)
self.xaxis_text.SetValue(xlab)
self.yaxis_text.SetValue(ylab)
self.xaxis_unit_text.SetValue(xunit)
self.yaxis_unit_text.SetValue(yunit)
self.xfont = xaxis_font
self.yfont = yaxis_font
self.is_xtick = is_xtick
self.is_ytick = is_ytick
if not xcolor:
self.xfont_color.SetSelection(0)
else:
self.xfont_color.SetStringSelection(xcolor)
if not ycolor:
self.yfont_color.SetSelection(0)
else:
self.yfont_color.SetStringSelection(ycolor)
if self.legend:
self.legend_loc_combo.SetStringSelection(legend_loc)
# get whether grid is toggled on/off
def get_togglegrid(self):
return self.toggle_grid.GetValue()
# get whether legend is toggled on/off
def get_togglelegend(self):
return self.toggle_legend.GetValue()
# get x label
def get_xlab(self):
return self.xaxis_text.GetValue()
# get y label
def get_ylab(self):
return self.yaxis_text.GetValue()
# get x unit
def get_xunit(self):
return self.xaxis_unit_text.GetValue()
# get y unit
def get_yunit(self):
return self.yaxis_unit_text.GetValue()
# get legend location
def get_legend_loc(self):
return self.get_loc_label()[self.legend_loc_combo.GetStringSelection()]
# get x axis label color
def get_xcolor(self):
return self.xfont_color.GetValue()
# get y axis label color
def get_ycolor(self):
return self.yfont_color.GetValue()
# get x axis font (type is FontProperties)
def get_xfont(self):
return self.xfont
# get y axis font
def get_yfont(self):
return self.yfont
def get_xtick_check(self):
return self.is_xtick
def get_ytick_check(self):
return self.is_ytick
if __name__ == '__main__':
app = wx.App()
graphD = graphAppearance(None, title='Modify graph appearance')
app.MainLoop()
|
bsd-3-clause
| 4,084,120,535,552,454,700
| 31.757282
| 95
| 0.598202
| false
| 3.206208
| false
| false
| false
|
daviewales/pimotion
|
pimotion/backend.py
|
1
|
4082
|
#!/usr/bin/env python3
import picamera
import numpy
import io
import time
def get_png_image(resolution=(640, 480)):
width, height = resolution
image_stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = resolution
camera.start_preview()
camera.capture(image_stream, format='png')
image_stream.seek(0)
return image_stream.read()
def get_image(resolution=(640, 480)):
'''
Yield an image of specified resolution to a byte stream.
'''
width, height = resolution
pixels = width * height
image_stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = resolution
camera.start_preview()
time.sleep(2) # Let the camera 'warm up'.
while True:
camera.capture(image_stream, format='yuv', use_video_port=True)
image_stream.seek(0)
image_bytes = image_stream.read(pixels)
image = numpy.fromstring(image_bytes, count=pixels,
dtype=numpy.int8)
image = image.reshape((height, width))[:height, :width]
yield image
image_stream.seek(0)
camera.stop_preview()
def difference_image(image1, image2, threshold):
height, width = image1.shape
return abs(image1 - image2).astype(numpy.uint8) > threshold
def motion_coordinates(difference_image, tile_width, tile_height, tile_motion):
"""
Get the coordinates of motion from a difference_image.
Split the image into tiles with dimensions
tile_width * tile_height.
Return the coordinates of the centre of each tile where the sum of
motion pixels within the tile is >= tile_motion * tile_area.
"""
height, width = difference_image.shape
tile_area = tile_height * tile_width
# tile_motion * tile_area gives the total number of
# changed pixels within a given tile required for
# motion to be registered.
changed_pixel_threshold = tile_motion * tile_area
centre_offset_x, centre_offset_y = tile_width//2, tile_height//2
coordinates = [
[x + centre_offset_x, y + centre_offset_y]
for x in range(0, width, tile_width)
for y in range(0, height, tile_height)
if difference_image[y:y+tile_height, x:x+tile_width].sum()
>= changed_pixel_threshold]
return coordinates
def get_motion_data(resolution=(640, 480), threshold=16,
tile_dimensions=(20, 20), tile_motion=0.8):
'''
Return list of lists of coordinates of motion.
resolution is a tuple containing the dimensions of the image:
resolution = (width, height).
threshold is a number specifying the minimum change in pixel intensity
required for motion to be registered.
tile_dimensions is a tuple containing the dimensions of the tiles
which the image will be divided into to check for motion.
tile_dimensions = (width, height).
tile_motion is the fraction of a given tile which must contain motion
for motion to be registered. For instance, if we are using 20x20 tiles,
then the total number of pixels contained in a given tile is 400 pixels.
If tile_motion == 1, then a tile will not be registerred as containing
motion
if < 400 pixels within the tile contain motion.
However, if tile_motion == 0.5, then only half the tile must contain motion
in order for the tile to be registered as motion.
'''
width, height = resolution
tile_width, tile_height = tile_dimensions
threshold = threshold * numpy.ones((height, width), dtype=numpy.uint8)
image_generator = get_image(resolution=resolution)
image1 = next(image_generator)
while True:
image2 = next(image_generator)
difference = difference_image(image1, image2, threshold)
motion = motion_coordinates(difference, tile_width,
tile_height, tile_motion)
yield motion
image1 = image2
if __name__ == '__main__':
print("You aren't supposed to run this directly!")
|
bsd-2-clause
| -5,974,357,752,693,007,000
| 32.735537
| 79
| 0.656786
| false
| 4.098394
| false
| false
| false
|
yukisakurai/hhntup
|
higgstautau/datasets.py
|
1
|
53181
|
"""
This module generates a database of all MC and data datasets
"""
from rootpy.io import root_open, DoesNotExist
#from multiprocessing import Pool, cpu_count
import sys
from operator import itemgetter
import logging
import re
import glob
import os
import cPickle as pickle
import atexit
import fnmatch
from collections import namedtuple
import yaml
from . import log; log = log[__name__]
from .decorators import cached_property
from .yaml_utils import Serializable
from . import xsec
USE_PYAMI = True
try:
from pyAMI.client import AMIClient
from pyAMI.query import get_dataset_xsec_effic, \
get_dataset_info, \
get_datasets, \
get_provenance, \
get_periods, \
get_runs
from pyAMI import query
from pyAMI.auth import AMI_CONFIG, create_auth_config
except ImportError:
USE_PYAMI = False
log.warning("pyAMI is not installed. "
"Cross section retrieval will be disabled.")
# data types
DATA, MC, EMBED, MCEMBED = range(4)
TYPES = {
'DATA': DATA,
'MC': MC,
'EMBED': EMBED,
'MCEMBED': MCEMBED,
}
Namedset = namedtuple('Namedset',
'name tags meta properties')
Dataset = namedtuple('Dataset',
Namedset._fields + ('datatype',))
class Fileset(namedtuple('Fileset', Dataset._fields + ('files', 'treename'))):
def split(self, partitions):
files = self.files[:]
fileset_files = [[] for _ in xrange(partitions)]
while len(files) > 0:
for fileset in fileset_files:
if len(files) > 0:
fileset.append(files.pop(0))
else:
break
mydict = self._asdict()
filesets = []
for fileset in fileset_files:
mydict['files'] = fileset
filesets.append(Fileset(**mydict))
return filesets
class Treeset(namedtuple('Treeset', Dataset._fields + ('trees',))):
def GetEntries(self, *args, **kwargs):
return sum([tree.GetEntries(*args, **kwargs) for tree in self.trees])
def Scale(self, value):
for tree in self.trees:
tree.Scale(value)
def __iter__(self):
for tree in self.trees:
yield tree
def Draw(self, *args, **kwargs):
for tree in self.trees:
tree.Draw(*args, **kwargs)
ATLASFileset = namedtuple('ATLASFileset', Fileset._fields + ('year', 'grl',))
DS_PATTERN = re.compile(
'^(?P<prefix>\S+\.)?'
'(?P<type>(data|mc))(?P<year>\d+)_(?P<energy>\d+)TeV'
'\.(?P<id>(\d+|period[A-Z]))'
'\.(?P<name>\w+)'
'(\.PhysCont)?'
'(\.(?P<ntup>merge\.NTUP_TAU(MEDIUM)?))?'
'\.(?P<tag>\w+)'
'(\.small)?'
'(\.v(?P<version>\d+))?(_s)?'
'\.(?P<suffix>\S+)$')
MC_TAG_PATTERN1 = re.compile(
'^e(?P<evnt>\d+)_'
's(?P<digi>\d+)_'
's(?P<digimerge>\d+)_'
'r(?P<reco>\d+)_'
'r(?P<recomerge>\d+)_'
'p(?P<ntup>\d+)$')
# not all valid samples have a recomerge tag:
MC_TAG_PATTERN2 = re.compile(
'^e(?P<evnt>\d+)_'
'[sa](?P<digi>\d+)_'
'[sa](?P<digimerge>\d+)_'
'r(?P<reco>\d+)_'
'p(?P<ntup>\d+)$')
# Embedded sample pattern
EMBED_PATTERN11 = re.compile(
'^(?P<prefix>\S+)?'
'period(?P<period>[A-Z])'
'\.DESD_SGLMU'
'\.pro(?P<prod>\d+)'
'\.embedding-(?P<embedtag>\S+)?'
'\.Ztautau_'
'(?P<channel>(lh)|(hh))_'
'(?P<isol>[a-z]+)_'
'(?P<mfs>[a-z]+)_'
'rereco_'
'p(?P<tag>\d+)_'
'EXT0'
'(\.(?P<suffix>\S+))?$')
EMBED_PATTERN12 = re.compile(
'^(?P<prefix>\S+)?'
'period(?P<period>[A-Z])'
'\.DESD_ZMUMU'
'\.pro(?P<prod>\d+)'
'\.embedding-(?P<embedtag>\S+)?'
'\.Ztautau_'
'(?P<channel>(lh)|(hh))_'
'(((high)|(low))pt_)?'
'(?P<mfs>[a-z]+)_'
'filter_'
'taureco_'
'p(?P<tag>\d+)_'
'EXT0'
'(\.(?P<suffix>\S+))?$')
EMBED_PATTERN12_NEW = re.compile(
'^(?P<prefix>\S+)?'
'data12_8TeV\.'
'period(?P<period>[A-Z])\.'
'physics_Muons\.PhysCont\.'
'NTUP_EMB(?P<channel>(LH)|(HH))'
'(?P<sys>(DN)|(IM)|(UP))\.'
'(?P<suffix>\S+)')
MC_EMBED_PATTERN = re.compile(
'^(?P<prefix>\S+)?'
'Pyth8.DESD_SGLMU.pro14.embedding-01-01-10.'
'Ztautau_MCEmbedding[\d]*_hh(?P<sys>(dn)|(up))?_p1344_EXT0'
'(\.(?P<suffix>\S+))?$')
## Common lephad ntuple pattern
CN_MC_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'(?P<id>\d+)'
'\.(?P<name>\w+)'
'\.(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'(-(?P<version3>\d+))?'
'\.(?P<suffix>\S+)$')
CN_DATA_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'data12_8TeV\.'
'(?P<id>\S+)'
'\.(?P<name>\w+)'
'((\.TunaCont.2013-March-29.v03)?)'
'\.(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'\.(?P<suffix>\S+)$')
CN_EMBED_PATTERN12 = re.compile(
'^(?P<prefix>\S+\.)?'
'data12_8TeV\.'
'(?P<id>\S+)'
'\.(?P<name>\w+)'
'\.PhysCont'
'((\.NTUP_EMB)?)'
'(?P<channel>(LH)|(HH))'
'(?P<mfs>(IM)|(UP)|(DN))'
'\.grp14_v02'
'\_(?P<tag>\w+)'
'_lhCN'
'(v(?P<version1>\d+))?'
'(-(?P<version2>\d+))?'
'(-(?P<version3>\d+))?'
'\.(?P<suffix>\S+)$')
# MC[11|12][a|b|c|...] categories are defined here
# Each MC dataset is automatically classified
# acccording to these categories by matching the reco
# and merge tags of the dataset name.
# Order by decreasing preference:
MC_CATEGORIES = {
'mc11a': {'reco': (2730, 2731),
'merge': (2780, 2700)},
'mc11b': {'reco': (2920, 2923),
'merge': (3063, 2993, 2900)},
'mc11c': {'reco': (3043, 3060, 3108),
'merge': (3109, 3063, 2993)},
'mc12a': {'reco': (3753, 3752, 3658, 3605, 3553, 3542, 3549),
'merge': (3549,)},
'mc12b': {'reco': (4485, 5470,),
'merge': (4540,)}}
HERE = os.path.dirname(os.path.abspath(__file__))
# Any datasets which don't have the provenance stored properly in AMI
# should be hardcoded here (it happens)
DS_NOPROV = {}
# Cross-sections are cached so that we don't need to keep asking AMI
# for them over and over
XSEC_CACHE_FILE = os.path.join(HERE, 'xsec', 'cache.pickle')
XSEC_CACHE_MODIFIED = False
XSEC_CACHE = {}
if USE_PYAMI:
amiclient = AMIClient()
if not os.path.exists(AMI_CONFIG):
create_auth_config()
amiclient.read_config(AMI_CONFIG)
class NoMatchingDatasetsFound(Exception):
pass
GLOBAL_BASE = '/global/'
def find_global(path):
if not path.startswith('/global/'):
raise ValueError("path must be absolute and rooted at /global")
path = re.sub('^/global/', '/cluster/data%02d/export/', path)
for node in range(1, 13):
if os.path.exists(path % node):
return path % node
raise IOError('path %s does not exist' % path)
class Database(dict):
@classmethod
def match_to_ds(cls, match):
"""
Construct the original NTUP dataset name from a skim match object
"""
if match.group('year') == '11':
ntup = 'merge.NTUP_TAUMEDIUM'
else:
ntup = 'merge.NTUP_TAU'
return '%s%s_%sTeV.%s.%s.%s.%s' % (
match.group('type'),
match.group('year'),
match.group('energy'),
match.group('id'),
match.group('name'),
ntup,
match.group('tag'))
def __init__(self, name='datasets', verbose=False, stream=None):
super(Database, self).__init__()
self.name = name
self.verbose = verbose
self.filepath = os.path.join(HERE, '%s.yml' % self.name)
if os.path.isfile(self.filepath):
with open(self.filepath) as db:
log.info("Loading database '%s' ..." % self.name)
d = yaml.load(db)
if d:
self.update(d)
self.modified = False
if stream is None:
self.stream = sys.stdout
else:
self.stream = stream
def write(self):
if self.modified:
with open(self.filepath, 'w') as db:
log.info("Saving database '%s' ..." % self.name)
yaml.dump(dict(self), db)
def reset(self):
return self.clear()
def clear(self):
# erase all datasets in database
log.info("Resetting database '%s' ..." % self.name)
super(Database, self).clear()
self.modified = True
def validate(self,
pattern=None,
datatype=None,
year=None):
ds = {}
for name, info in self.items():
if year is not None and info.year != year:
continue
if datatype is not None and info.datatype != datatype:
continue
if info.datatype == DATA and info.id < 0:
# only validate data run datasets
continue
if pattern is None or fnmatch.fnmatch(name, pattern):
ds[name] = info
incomplete = []
for name, info in sorted(ds.items(), key=lambda item: item[0]):
log.info("Validating %s ..." % name)
complete = validate_single((name, info), child=False)
log.info("Complete: %s" % complete)
log.info('-' * 50)
if not complete:
incomplete.append(info.ds)
#pool = Pool(processes=cpu_count())
#for result, complete in pool.map(
# validate_single, sorted(ds.items(), key=itemgetter(0))):
# print result
# print "Complete: %s" % complete
# print '-'*50
# if not complete:
# all_complete = False
if not incomplete:
log.info("ALL DATASETS ARE COMPLETE")
else:
log.warning("SOME DATASETS ARE NOT COMPLETE:")
for ds in incomplete:
print ds
def scan(self, year,
mc_path=None,
mc_prefix=None,
mc_pattern=None,
mc_treename=None,
mc_sampletype=None,
data_path=None,
data_prefix=None,
data_pattern=None,
data_treename=None,
data_sampletype=None,
data_grl=None,
data_period_containers=False,
embed_path=None,
embed_prefix=None,
embed_pattern=None,
embed_treename=None,
embed_sampletype=None,
versioned=False,
deep=False):
"""
Update the dataset database
"""
log.info("Updating database '%s' ..." % self.name)
self.modified = True
###############################
# MC
###############################
if mc_path is not None:
if deep:
mc_dirs = get_all_dirs_under(mc_path, prefix=mc_prefix)
else:
if mc_prefix:
mc_dirs = glob.glob(os.path.join(mc_path, mc_prefix) + '*')
else:
mc_dirs = glob.glob(os.path.join(mc_path, '*'))
for dir in mc_dirs:
dirname, basename = os.path.split(dir)
if mc_sampletype == 'standard':
match = re.match(DS_PATTERN, basename)
if match:
if int(match.group('year')) != (year % 1E3):
continue
if match.group('type') != 'mc':
continue
ds_name = Database.match_to_ds(match)
name = match.group('name')
tag = match.group('tag')
try:
version = int(match.group('version'))
except IndexError:
version = 0
except:
log.warning(basename)
raise
tag_match = re.match(MC_TAG_PATTERN1, tag)
tag_match2 = re.match(MC_TAG_PATTERN2, tag)
MC_TAG_PATTERN = MC_TAG_PATTERN1
if (tag_match2 and not tag_match) :
tag_match = tag_match2
MC_TAG_PATTERN = MC_TAG_PATTERN2
if not tag_match:
log.warning("not tag-matched: %s" % basename)
continue
cat = None
for cat_name, cat_params in MC_CATEGORIES.items():
if int(tag_match.group('reco')) in cat_params['reco']:
cat = cat_name
break
if cat is None:
log.warning(
"does not match a category: %s" % basename)
continue
name += '.' + cat
dataset = self.get(name, None)
if dataset is not None and version == dataset.version:
if tag != dataset.tag:
this_reco = int(tag_match.group('reco'))
other_reco = int(
re.match(dataset.tag_pattern,
dataset.tag).group('reco'))
use_mergetag = True
try:
this_merge = int(tag_match.group('recomerge'))
other_merge = int(
re.match(dataset.tag_pattern,
dataset.tag).group('recomerge'))
except IndexError:
use_mergetag = False
cat_params = MC_CATEGORIES[cat]
reco_tags = list(cat_params['reco'])
merge_tags = list(cat_params['merge'])
assert(this_reco in reco_tags and other_reco in reco_tags)
take_this = False
if reco_tags.index(this_reco) < reco_tags.index(other_reco):
take_this = True
elif (use_mergetag and this_reco == other_reco and
(merge_tags.index(this_merge) <
merge_tags.index(other_merge))):
take_this = True
if take_this:
log.warning("taking %s over %s" % (
basename, dataset.ds))
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=ds_name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=MC_TAG_PATTERN.pattern,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
elif dir not in dataset.dirs:
dataset.dirs.append(dir)
elif dataset is None or (
dataset is not None and version > dataset.version):
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=ds_name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=MC_TAG_PATTERN.pattern,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
elif self.verbose:
log.warning("not a valid mc dataset name: %s" % basename)
elif mc_sampletype == 'lhCN':
match = re.match(CN_MC_PATTERN12, basename)
if match:
name = match.group('name')
cat = 'mc12a'
tag = match.group('tag')
year = 2012
## Calculate a version int
version_1 = match.group('version1')
version_2 = match.group('version2')
version = int(version_1)*1000 + int(version_2)*10
dataset = self.get(name, None)
if dataset is not None and version == dataset.version:
if dir not in dataset.dirs:
dataset.dirs.append(dir)
else:
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=MC,
treename=mc_treename,
ds=name,
id=int(match.group('id')),
category=cat,
version=version,
tag_pattern=None,
tag=tag,
dirs=[dir],
file_pattern=mc_pattern,
year=year)
#####################################
# EMBEDDING
#####################################
if embed_path is not None:
if deep:
embed_dirs = get_all_dirs_under(embed_path, prefix=embed_prefix)
else:
if embed_prefix:
embed_dirs = glob.glob(
os.path.join(embed_path, embed_prefix) + '*')
else:
embed_dirs = glob.glob(
os.path.join(embed_path, '*'))
if embed_sampletype == 'new':
EMBED_PATTERN = EMBED_PATTERN12_NEW
# determine what channels are available
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
syst = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
isol = match.group('sys')
if isol not in syst:
syst[isol] = []
syst[isol].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for syst_type, dirs in syst.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, syst_type)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=dirs,
file_pattern=embed_pattern,
year=year)
elif embed_sampletype == 'standard':
if year == 2011:
EMBED_PATTERN = EMBED_PATTERN11
else:
EMBED_PATTERN = EMBED_PATTERN12
# determine what channels are available
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
if year == 2011:
# group dirs by isolation
isols = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
isol = match.group('isol')
if isol not in isols:
isols[isol] = []
isols[isol].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for isol, isol_dirs in isols.items():
# group dirs by mfs
mfss = {}
for dir in isol_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s-%s' % (
year % 1000, channel, isol, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
periods = {}
for dir in mfs_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
period = match.group('period')
tag = match.group('tag')
if period not in periods:
periods[period] = {'tag': tag, 'dirs': [dir]}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of run with '
'different tags: %s' %
periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid embeding dataset name: %s"
% basename)
for period, info in periods.items():
period_name = '%s-%s' % (name, period)
self[period_name] = Dataset(
name=period_name,
datatype=EMBED,
treename=embed_treename,
ds=period_name,
id=1,
grl=data_grl,
dirs=info['dirs'],
file_pattern=embed_pattern,
year=year)
else:
# group dirs by mfs
mfss = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
periods = {}
for dir in mfs_dirs:
dirname, basename = os.path.split(dir)
match = re.match(EMBED_PATTERN, basename)
if match:
period = match.group('period')
tag = match.group('tag')
if period not in periods:
periods[period] = {'tag': tag, 'dirs': [dir]}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of run with '
'different tags: %s' %
periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for period, info in periods.items():
period_name = '%s-%s' % (name, period)
self[period_name] = Dataset(
name=period_name,
datatype=EMBED,
treename=embed_treename,
ds=period_name,
id=1,
grl=data_grl,
dirs=info['dirs'],
file_pattern=embed_pattern,
year=year)
elif embed_sampletype == 'lhCN':
year = 2012
channels = {}
for dir in embed_dirs:
if os.path.isdir(dir):
dirname, basename = os.path.split(dir)
match = re.match(CN_EMBED_PATTERN12, basename)
if match:
channel = match.group('channel')
if channel not in channels:
channels[channel] = []
channels[channel].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
elif self.verbose:
log.warning("skipping file: %s" % dir)
for channel, channel_dirs in channels.items():
# group dirs by mfs
mfss = {}
for dir in channel_dirs:
dirname, basename = os.path.split(dir)
match = re.match(CN_EMBED_PATTERN12, basename)
if match:
mfs = match.group('mfs')
if mfs not in mfss:
mfss[mfs] = []
mfss[mfs].append(dir)
elif self.verbose:
log.warning(
"not a valid embedding dataset name: %s"
% basename)
for mfs, mfs_dirs in mfss.items():
name = 'embed%d-%s-%s' % (
year % 1000, channel, mfs)
self[name] = Dataset(
name,
datatype=EMBED,
treename=embed_treename,
ds=name,
id=1,
grl=data_grl,
dirs=mfs_dirs,
file_pattern=embed_pattern,
year=year)
# MC EMBEDDING
variations = {}
for dir in embed_dirs:
dirname, basename = os.path.split(dir)
match = re.match(MC_EMBED_PATTERN, basename)
if not match:
continue
syst = match.group('sys') or ''
variations.setdefault(syst, []).append(dir)
for variation, dirs in variations.items():
name = 'mcembed12-hh%s' % variation
self[name] = Dataset(
name,
datatype=MCEMBED,
treename=embed_treename,
ds=name,
id=1,
dirs=dirs,
file_pattern=embed_pattern,
year=2012)
##############################
# DATA
##############################
if data_path is not None:
if deep:
data_dirs = get_all_dirs_under(data_path, prefix=data_prefix)
else:
if data_prefix:
data_dirs = glob.glob(
os.path.join(data_path, data_prefix) + '*')
else:
data_dirs = glob.glob(
os.path.join(data_path, '*'))
if data_sampletype == 'standard':
# classify dir by stream
streams = {}
for dir in data_dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
# pass embed
if re.match(EMBED_PATTERN12_NEW, basename) or \
re.match(EMBED_PATTERN12, basename) or \
re.match(EMBED_PATTERN11, basename) :
continue
if int(match.group('year')) != (year % 1E3):
continue
if match.group('type') != 'data':
continue
stream = match.group('name').split('_')[-1]
if stream not in streams:
streams[stream] = []
streams[stream].append(dir)
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
for stream, dirs in streams.items():
name = 'data%d-%s' % (year % 1000, stream)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
# The GRL is the same for both lephad and hadhad analyses
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
if data_period_containers:
# in each stream create a separate dataset for each run
periods = {}
for dir in dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
period = match.group('id')
if not period.startswith('period'):
continue
tag = match.group('tag')
if period not in periods:
periods[period] = {
'tag': tag,
'dirs': [dir],
'ds': Database.match_to_ds(match)}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of period with different '
'tags: \n%s' %
('\n'.join(periods[period]['dirs'])))
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
# need to use the actual ds name for ds for validation
for period, info in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period[-1])
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=period,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
else:
# in each stream create a separate dataset for each run
runs = {}
for dir in dirs:
dirname, basename = os.path.split(dir)
match = re.match(DS_PATTERN, basename)
if match:
run = int(match.group('id'))
tag = match.group('tag')
if run not in runs:
runs[run] = {
'tag': tag,
'dirs': [dir],
'ds': Database.match_to_ds(match)}
else:
runs[run]['dirs'].append(dir)
if tag != runs[run]['tag']:
log.warning(
'multiple copies of run with different '
'tags: %s' % runs[run]['dirs'])
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % basename)
# need to use the actual ds name for ds for validation
for run, info in runs.items():
name = 'data%d-%s-%d' % (year % 1000, stream, run)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=run,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
if USE_PYAMI:
# in each stream create a separate dataset for each period
run_periods = get_periods(amiclient, year=year, level=2)
# ignore subset periods like Ba in 2012
run_periods = [
p.name for p in run_periods if len(p.name) == 1]
period_runs = {}
for period in run_periods:
if period == 'VdM':
continue
_runs = get_runs(amiclient, periods=period, year=year)
for run in _runs:
period_runs[run] = period
periods = {}
for run, info in runs.items():
if run in period_runs:
_period = period_runs[run]
else:
# ignore spurious runs
continue
if _period in periods:
periods[_period] += info['dirs']
else:
periods[_period] = info['dirs'][:]
for period, dirs in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
elif data_sampletype == 'lhCN':
year = 2012
streams = {}
for dir in data_dirs:
match = re.match(CN_DATA_PATTERN12, dir)
if match:
stream = match.group('name')
if stream not in streams:
streams[stream] = []
streams[stream].append(dir)
elif self.verbose:
log.warning("not a valid data dataset name: %s" % dir)
for stream, dirs in streams.items():
name = 'data%d-%s' % (year % 1000, stream)
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=name,
id=-1,
# The GRL is the same for both lephad and hadhad analyses
grl=data_grl,
dirs=dirs,
stream=stream,
file_pattern=data_pattern,
year=year)
# in each stream create a separate dataset for each period
periods = {}
for dir in dirs:
match = re.match(CN_DATA_PATTERN12, dir)
if match:
period = match.group('id')
tag = match.group('tag')
if period not in periods:
periods[period] = {
'tag': tag,
'dirs': [dir],
'ds': -1}
else:
periods[period]['dirs'].append(dir)
if tag != periods[period]['tag']:
log.warning(
'multiple copies of period with different '
'tags: %s' % periods[period]['dirs'])
elif self.verbose:
log.warning(
"not a valid data dataset name: %s" % dir)
# need to use the actual ds name for ds for validation
for period, info in periods.items():
name = 'data%d-%s-%s' % (year % 1000, stream, period)
log.info('\'%s\',' % name)
self[name] = Dataset(
name=name,
datatype=DATA,
treename=data_treename,
ds=info['ds'],
id=period,
grl=data_grl,
dirs=info['dirs'],
stream=stream,
file_pattern=data_pattern,
year=year)
def __setitem__(self, name, ds):
if self.verbose:
print >> self.stream, str(ds)
super(Database, self).__setitem__(name, ds)
def search(self, pattern):
data = []
patterns = pattern
if not isinstance(pattern, (list, tuple)):
patterns = [pattern]
for name, ds in self.items():
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
data.append(ds)
continue
if not pattern.startswith('^'):
pattern = '^' + pattern
if not pattern.endswith('$'):
pattern = pattern + '$'
if re.match(pattern, name):
data.append(ds)
continue
return data
class Dataset(Serializable):
yaml_tag = u'!Dataset'
def __init__(self, name, datatype, treename, ds, dirs,
file_pattern='*.root*',
id=None,
category=None,
version=None,
tag_pattern=None,
tag=None,
grl=None,
year=None,
stream=None):
self.name = name
self.datatype = datatype
self.treename = treename
self.id = id
self.ds = ds
self.category = category
self.version = version
self.tag_pattern = tag_pattern
self.tag = tag
self.dirs = dirs
self.file_pattern = file_pattern
self.grl = grl
self.year = year
self.stream = stream
def __repr__(self):
return ("%s(name=%r, datatype=%r, treename=%r, "
"id=%r, ds=%r, category=%r, version=%r, "
"tag_pattern=%r, tag=%r, dirs=%r, "
"file_pattern=%r, grl=%r, year=%r, stream=%r)") % (
self.__class__.__name__,
self.name, self.datatype, self.treename,
self.id, self.ds, self.category, self.version,
self.tag_pattern, self.tag, self.dirs,
self.file_pattern, self.grl, self.year, self.stream)
@cached_property
def xsec_kfact_effic(self):
global XSEC_CACHE_MODIFIED
year = self.year % 1E3
if self.datatype == DATA:
return 1., 1., 1.
if year in XSEC_CACHE and self.name in XSEC_CACHE[year]:
log.warning("using cached cross section for dataset %s" % self.ds)
return XSEC_CACHE[year][self.name]
try:
return xsec.xsec_kfact_effic(self.year, self.id)
except KeyError:
log.warning("cross section of dataset %s not available locally."
"Looking it up in AMI instead. AMI cross sections can be very"
"wrong! You have been warned!"
% self.ds)
if USE_PYAMI:
if self.ds in DS_NOPROV:
xs, effic = get_dataset_xsec_effic(amiclient, DS_NOPROV[self.ds])
else:
xs, effic = get_dataset_xsec_effic(amiclient, self.ds)
if year not in XSEC_CACHE:
XSEC_CACHE[year] = {}
XSEC_CACHE[year][self.name] = (xs, 1., effic)
XSEC_CACHE_MODIFIED = True
return xs, 1., effic
raise Exception("cross section of dataset %s is not known!" % self.ds)
@cached_property
def files(self):
if not self.dirs:
log.warning(
"files requested from dataset %s "
"with an empty list of directories" % self.name)
_files = []
for dir in self.dirs:
if not os.path.exists(dir):
raise IOError("%s is not readable" % dir)
for path, dirs, files in os.walk(dir):
_files += [os.path.join(path, f) for f in
fnmatch.filter(files, self.file_pattern)]
return _files
def __str__(self):
return "%s (%d files):\n\t%s" % (
self.name,
len(self.files),
self.ds)
def dataset_constructor(loader, node):
kwargs = loader.construct_mapping(node)
try:
return Dataset(**kwargs)
except:
fields = '\n'.join('%s = %s' % item for item in kwargs.items())
log.error("unable to load dataset %s with these fields:\n\n%s\n" %
(kwargs['name'], fields))
raise
yaml.add_constructor(u'!Dataset', dataset_constructor)
if os.path.isfile(XSEC_CACHE_FILE):
with open(XSEC_CACHE_FILE) as cache:
log.info("Loading cross section cache in %s ..." % XSEC_CACHE_FILE)
XSEC_CACHE = pickle.load(cache)
@atexit.register
def write_cache():
if XSEC_CACHE_MODIFIED:
with open(XSEC_CACHE_FILE, 'w') as cache:
log.info("Saving cross-section cache to disk...")
pickle.dump(XSEC_CACHE, cache)
def validate_single(args, child=True):
if child:
from cStringIO import StringIO
sys.stdout = out = StringIO()
sys.stderr = out
name = args[0]
info = args[1]
complete = True
try:
dirs = info.dirs
root_files = []
for dir in dirs:
root_files += glob.glob(os.path.join(dir, info.file_pattern))
events = 0
for fname in root_files:
try:
with root_open(fname) as rfile:
try: # skimmed dataset
events += int(rfile.cutflow_event[0])
except DoesNotExist: # unskimmed dataset
tree = rfile.tau
events += tree.GetEntries()
except IOError:
log.warning("Currupt file: %s" % fname)
pass
# determine events in original ntuples
# use first dir
ds_name = info.ds
log.info('NTUP: ' + ds_name)
ds_info = get_dataset_info(amiclient, ds_name)
ntuple_events = int(ds_info.info['totalEvents'])
try:
# determine events in AODs
prov = get_provenance(amiclient, ds_name, type='AOD')
AOD_ds = prov.values()[0][0].replace('recon', 'merge')
log.info('AOD: ' + AOD_ds)
AOD_events = int(get_datasets(amiclient, AOD_ds, fields='events',
flatten=True)[0][0])
except IndexError:
log.info('AOD: UNKNOWN')
AOD_events = ntuple_events
log.info(name)
log.info("\tevts\tNTUP\tAOD")
log.info("\t%i\t%i\t%i" % (events, ntuple_events, AOD_events))
if events != ntuple_events:
log.warning("NTUP MISMATCH")
if events != AOD_events:
log.warning("AOD MISMATCH")
if events != ntuple_events and (events != AOD_events or AOD_events == 0):
log.warning("MISSING EVENTS")
complete = False
if child:
return out.getvalue(), complete
return complete
except Exception, e:
import traceback
log.warning("dataset %s exception" % name)
traceback.print_exception(*sys.exc_info())
if child:
return out.getvalue(), False
return False
def get_all_dirs_under(path, prefix=None):
"""
Get list of all directories under path
"""
dirs = []
for dirpath, dirnames, filenames in os.walk(path):
_dirnames = []
for dirname in dirnames:
fullpath = os.path.join(dirpath, dirname)
# check if this dir contains other dirs
subdirs_exist = False
subdirs = os.listdir(fullpath)
for subdir in subdirs:
if os.path.isdir(os.path.join(fullpath, subdir)):
subdirs_exist = True
break
if subdirs_exist:
_dirnames.append(dirname)
else:
# this must be a dataset, don't walk into this dir
if prefix is not None:
if not dirname.startswith(prefix):
continue
dirs.append(fullpath)
# only recurse on directories containing subdirectories
dirnames = _dirnames
return dirs
|
gpl-3.0
| -2,071,067,147,111,899,000
| 39.565217
| 92
| 0.391756
| false
| 4.835955
| false
| false
| false
|
sahat/bokeh
|
bokeh/plot_object.py
|
1
|
14108
|
from __future__ import absolute_import, print_function
import os.path
from uuid import uuid4
from functools import wraps
import warnings
import logging
logger = logging.getLogger(__file__)
from six import add_metaclass, iteritems
from six.moves.urllib.parse import urlsplit
from .embed import autoload_static, autoload_server
from .properties import HasProps, MetaHasProps, Instance
from .protocol import serialize_json
from .utils import get_ref, convert_references, dump
class Viewable(MetaHasProps):
""" Any plot object (Data Model) which has its own View Model in the
persistence layer.
Adds handling of a __view_model__ attribute to the class (which is
provided by default) which tells the View layer what View class to
create.
One thing to keep in mind is that a Viewable should have a single
unique representation in the persistence layer, but it might have
multiple concurrent client-side Views looking at it. Those may
be from different machines altogether.
"""
# Stores a mapping from subclass __view_model__ names to classes
model_class_reverse_map = {}
# Mmmm.. metaclass inheritance. On the one hand, it seems a little
# overkill. On the other hand, this is exactly the sort of thing
# it's meant for.
def __new__(cls, class_name, bases, class_dict):
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
class_dict["get_class"] = Viewable.get_class
# Create the new class
newcls = super(Viewable,cls).__new__(cls, class_name, bases, class_dict)
entry = class_dict["__view_model__"]
# Add it to the reverse map, but check for duplicates first
if entry in Viewable.model_class_reverse_map:
raise Warning("Duplicate __view_model__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
Viewable.model_class_reverse_map[entry]))
Viewable.model_class_reverse_map[entry] = newcls
return newcls
@classmethod
def _preload_models(cls):
from . import objects, widgetobjects
@classmethod
def get_class(cls, view_model_name):
""" Given a __view_model__ name, returns the corresponding class
object
"""
cls._preload_models()
d = Viewable.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
def usesession(meth):
""" Checks for 'session' in kwargs and in **self**, and guarantees
that **kw** always has a valid 'session' parameter. Wrapped methods
should define 'session' as an optional argument, and in the body of
the method, should expect an
"""
@wraps(meth)
def wrapper(self, *args, **kw):
session = kw.get("session", None)
if session is None:
session = getattr(self, "session")
if session is None:
raise RuntimeError("Call to %s needs a session" % meth.__name__)
kw["session"] = session
return meth(self, *args, **kw)
return wrapper
def is_ref(frag):
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
"""recursively searches through a nested dict/lists
if check_func(fragment) is True, then we return
func(fragment)
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def resolve_json(fragment, models):
check_func = is_ref
def func(fragment):
if fragment['id'] in models:
return models[fragment['id']]
else:
logging.error("model not found for %s", fragment)
return None
return json_apply(fragment, check_func, func)
@add_metaclass(Viewable)
class PlotObject(HasProps):
""" Base class for all plot-related objects """
session = Instance(".session.Session")
def __init__(self, **kwargs):
# Eventually should use our own memo instead of storing
# an attribute on the class
if "id" in kwargs:
self._id = kwargs.pop("id")
else:
self._id = str(uuid4())
self._dirty = True
self._callbacks_dirty = False
self._callbacks = {}
self._callback_queue = []
self._block_callbacks = False
block_events = kwargs.pop('_block_events', False)
if not block_events:
super(PlotObject, self).__init__(**kwargs)
self.setup_events()
else:
self._block_callbacks = True
super(PlotObject, self).__init__(**kwargs)
def get_ref(self):
return {
'type': self.__view_model__,
'id': self._id,
}
def setup_events(self):
pass
@classmethod
def load_json(cls, attrs, instance=None):
"""Loads all json into a instance of cls, EXCEPT any references
which are handled in finalize
"""
if 'id' not in attrs:
raise RuntimeError("Unable to find 'id' attribute in JSON: %r" % attrs)
_id = attrs.pop('id')
if not instance:
instance = cls(id=_id, _block_events=True)
_doc = attrs.pop("doc", None)
ref_props = {}
for p in instance.properties_with_refs():
if p in attrs:
ref_props[p] = attrs.pop(p)
special_props = {}
for p in dict(attrs):
if p not in instance.properties():
special_props[p] = attrs.pop(p)
instance._ref_props = ref_props
instance._special_props = special_props
instance.update(**attrs)
return instance
def finalize(self, models):
"""Convert any references into instances
models is a dict of id->model mappings
"""
if hasattr(self, "_ref_props"):
return resolve_json(self._ref_props, models)
else:
return {}
@classmethod
def collect_plot_objects(cls, *input_objs):
""" Iterate over ``input_objs`` and descend through their structure
collecting all nested ``PlotObjects`` on the go. The resulting list
is duplicate-free based on objects' identifiers.
"""
ids = set([])
objs = []
def descend_props(obj):
for attr in obj.properties_with_refs():
descend(getattr(obj, attr))
def descend(obj):
if isinstance(obj, PlotObject):
if obj._id not in ids:
ids.add(obj._id)
descend_props(obj)
objs.append(obj)
elif isinstance(obj, HasProps):
descend_props(obj)
elif isinstance(obj, (list, tuple)):
for item in obj:
descend(item)
elif isinstance(obj, dict):
for key, value in iteritems(obj):
descend(key); descend(value)
descend(input_objs)
return objs
def references(self):
"""Returns all ``PlotObjects`` that this object has references to. """
return set(self.collect_plot_objects(self))
#---------------------------------------------------------------------
# View Model connection methods
#
# Whereas a rich client rendering framework can maintain view state
# alongside model state, we need an explicit send/receive protocol for
# communicating with a set of view models that reside on the front end.
# Many of the calls one would expect in a rich client map instead to
# batched updates on the M-VM-V approach.
#---------------------------------------------------------------------
def vm_props(self):
""" Returns the ViewModel-related properties of this object. """
props = self.changed_properties_with_values()
props.pop("session", None)
return props
def vm_serialize(self):
""" Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
"""
attrs = self.vm_props()
attrs['id'] = self._id
return attrs
def dump(self, docid=None):
"""convert all references to json
"""
models = self.references()
return dump(models, docid=docid)
def update(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return "%s, ViewModel:%s, ref _id: %s" % (self.__class__.__name__,
self.__view_model__, getattr(self, "_id", None))
def on_change(self, attrname, obj, callbackname=None):
"""when attrname of self changes, call callbackname
on obj
"""
callbacks = self._callbacks.setdefault(attrname, [])
callback = dict(obj=obj, callbackname=callbackname)
if callback not in callbacks:
callbacks.append(callback)
self._callbacks_dirty = True
def _trigger(self, attrname, old, new):
"""attrname of self changed. So call all callbacks
"""
callbacks = self._callbacks.get(attrname)
if callbacks:
for callback in callbacks:
obj = callback.get('obj')
callbackname = callback.get('callbackname')
fn = obj if callbackname is None else getattr(obj, callbackname)
fn(self, attrname, old, new)
# TODO: deprecation warnign about args change (static_path)
def create_html_snippet(
self, server=False, embed_base_url="", embed_save_loc=".",
static_path="http://localhost:5006/bokehjs/static"):
"""create_html_snippet is used to embed a plot in an html page.
create_html_snippet returns the embed string to be put in html.
This will be a <script> tag.
To embed a plot dependent on the Bokeh Plot Server, set server=True,
otherwise a file with the data for the plot will be built.
embed_base_url is used for non-server embedding. This is used
as the root of the url where the embed.js file will be saved.
embed_save_loc controls where the embed.js will be actually written to.
static_path controls where the embed snippet looks to find
bokeh.js and the other resources it needs for bokeh.
"""
if server:
from .session import Session
if embed_base_url:
session = Session(root_url=server_url)
else:
session = Session()
return autoload_server(self, session)
from .templates import AUTOLOAD, AUTOLOAD_STATIC
import uuid
js_filename = "%s.embed.js" % self._id
script_path = embed_base_url + js_filename
elementid = str(uuid.uuid4())
js = AUTOLOAD.render(
all_models = serialize_json(self.dump()),
js_url = static_path + "js/bokeh.min.js",
css_files = [static_path + "css/bokeh.min.css"],
elementid = elementid,
)
tag = AUTOLOAD_STATIC.render(
src_path = script_path,
elementid = elementid,
modelid = self._id,
modeltype = self.__view_model__,
)
save_path = os.path.join(embed_save_loc, js_filename)
with open(save_path,"w") as f:
f.write(js)
return tag
def inject_snippet(
self, server=False, embed_base_url="", embed_save_loc=".",
static_path="http://localhost:5006/bokeh/static/"):
warnings.warn("inject_snippet is deprecated, please use create_html_snippet")
return self.create_html_snippet(
server, embed_base_url, embed_save_loc, static_path)
def _build_server_snippet(self, base_url=False):
sess = self._session
modelid = self._id
typename = self.__view_model__
if not base_url:
base_url = sess.root_url
split = urlsplit(base_url)
if split.scheme == 'http':
ws_conn_string = "ws://%s/bokeh/sub" % split.netloc
else:
ws_conn_string = "wss://%s/bokeh/sub" % split.netloc
f_dict = dict(
docid = sess.docid,
ws_conn_string = ws_conn_string,
docapikey = sess.apikey,
root_url = base_url,
modelid = modelid,
modeltype = typename,
script_url = base_url + "bokeh/embed.js")
e_str = '''<script src="%(script_url)s" bokeh_plottype="serverconn"
bokeh_docid="%(docid)s" bokeh_ws_conn_string="%(ws_conn_string)s"
bokeh_docapikey="%(docapikey)s" bokeh_root_url="%(root_url)s"
bokeh_modelid="%(modelid)s" bokeh_modeltype="%(modeltype)s" async="true"></script>
'''
return "", e_str % f_dict
def _build_static_embed_snippet(self, static_path, embed_base_url):
embed_filename = "%s.embed.js" % self._id
full_embed_path = embed_base_url + embed_filename
js_str = self._session.embed_js(self._id, static_path)
sess = self._session
modelid = self._id
typename = self.__view_model__
embed_filename = full_embed_path
f_dict = dict(modelid = modelid, modeltype = typename,
embed_filename=embed_filename)
e_str = '''<script src="%(embed_filename)s" bokeh_plottype="embeddata"
bokeh_modelid="%(modelid)s" bokeh_modeltype="%(modeltype)s" async="true"></script>
'''
return js_str, e_str % f_dict
|
bsd-3-clause
| 4,577,947,309,496,214,000
| 34.007444
| 90
| 0.581372
| false
| 4.148192
| false
| false
| false
|
apierleoni/MyBioDb
|
modules/search_engine.py
|
1
|
20410
|
__author__ = 'pierleonia'
DEBUG=True
import os, traceback
from multiprocessing import Pool
class BioentrySearchEngineBackend(object):
def rebuild(self, bioentry_ids=[], **kwargs):
raise NotImplementedError()
def indexes(self, **kwargs):
raise NotImplementedError()
def after_insert(self, **kwargs):
raise NotImplementedError()
def after_update(self, **kwargs):
raise NotImplementedError()
def get_ids(self, **kwargs):
raise NotImplementedError()
def after_delete(self, **kwargs):
raise NotImplementedError()
def search(self, query, **kwargs):
raise NotImplementedError()
def quick_search(self, query):
raise NotImplementedError()
def create_loading_Pool(self):
self.pool = Pool(processes=getCPUs())
def add_bioentry_id_to_index(self, bioentry_id, counter = 1):
raise NotImplementedError()
def map_to_index(self,handler, bioentry_id):
def add_element(element, container):
if isinstance(element,str):
container.append(unicode(element))
elif isinstance(element,list):
for i in element:
if isinstance(i, list):
container.append(unicode(i[0]))
elif isinstance(i, str):
container.append(unicode(i))
return container
seqrecord = handler._retrieve_seqrecord(bioentry_id)
annotation_types, annotation_values = [],[]
feature_types, feature_values = [],[]
comments = []
accessions = []
keywords = []
pubids, pubauths, pubtitles, pubjournals= [],[],[],[]
taxonomy = [] #TODO: add taxonomy
for k,v in seqrecord.annotations.items():
if k == 'accessions':
accessions = add_element(v,accessions)
elif k.startswith('comment'):
comments = add_element(v,comments)
elif k == 'keywords':
keywords = add_element(v,keywords)
elif k=='references':
if isinstance(v,list):
for ref in v:
pubids.append(ref.pubmed_id)
pubtitles.append(ref.title)
pubauths.append(ref.authors.strip())
pubjournals.append(ref.journal)
else:
annotation_values = add_element(v,annotation_values)
annotation_types = add_element(k,annotation_types)
for feature in seqrecord.features:
feature_types.append(feature.type)
for k,v in feature.qualifiers.items():
feature_values = add_element(v,feature_values)
kwargs = dict(id = unicode(bioentry_id),
db = unicode(handler.adaptor.biodatabase[handler.adaptor.bioentry[bioentry_id].biodatabase_id].name),
name=unicode(seqrecord.name),
accession=accessions,
identifier=unicode(seqrecord.id),
description=unicode(seqrecord.description),
keyword=keywords,
annotation=annotation_values,
annotationtype=annotation_types,
comment=comments,
feature=feature_values,
featuretype=feature_types,
lenght=unicode(len(seqrecord)),
dbxref=seqrecord.dbxrefs,
pubid=pubids,
pubtitle=pubtitles,
pubauth=pubauths,
pubjournal=pubjournals)
return kwargs
class SearchEngineResult(object):
def __init__(self, ids, handler, data = {}):
self.biodb = handler.adaptor
self.db_query = self.biodb.bioentry._id.belongs(ids) # to be used in DAL queries
self.selected_ids = ids
self.count = len(ids)
self.select_sql = self.biodb(self.biodb.bioentry.id.belongs(ids))._select() #raw sql to retrieve data from the bioentry table
self.data = data
def getCPUs():
import multiprocessing
try:
return multiprocessing.cpu_count()
except:
return 1
def picklable_call(instance, name, args=(), kwargs=None):
"indirect caller for instance methods and multiprocessing"
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs)
class WhooshBackend(BioentrySearchEngineBackend):
def __init__(self, handler, indexdir):
self.handler = handler
self.biodb = handler.adaptor
self.indexdir = indexdir
if not os.path.exists(indexdir):
os.mkdir(indexdir)
def indexes(self):
try:
from whoosh.index import create_in,open_dir
except ImportError:
raise ImportError("Cannot find Whoosh")
self.indexname =".".join([self.biodb._uri_hash, 'whoosh'])
try:
self.ix = open_dir(self.indexdir, indexname=self.indexname)
except:
self.ix = create_in(self.indexdir, self._get_schema(), indexname=self.indexname)
def rebuild(self, **kwargs):
cpus = getCPUs()
writer = self.ix.writer(procs=cpus, multisegment=True)
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding with %i CPUs"%cpus
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
i, m = 0, 1000
while True:
start = i*m
end = (i+1)*m
if DEBUG:
print "searching for round ",start,end
#print "searching query: " + self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end]))._select()
rows = self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end])).select(self.biodb.bioentry.id)
#if DEBUG: print "round size found: ",len(rows)
for row in rows:
try:
#if DEBUG: print "Indexing bioentry %s - %i"%(row.name, i+1)
writer.update_document(**self.map_to_index(self.handler,row.id))
#if DEBUG:
# print "Indexed bioentry %s - %i"%(row.name, start)
except:
if DEBUG:
print "error building index for id: ",row.id
traceback.print_exc()
if len(rows)<m: break
i+=1
writer.commit()
def search(self, query, **kwargs):
from whoosh.qparser import QueryParser,MultifieldParser
fieldnames = kwargs.pop('fieldnames', self.ix.schema.names())
qp = MultifieldParser( fieldnames, schema=self.ix.schema)
q = qp.parse(query)
with self.ix.searcher() as s:
results = s.search(q, **kwargs)
if DEBUG: print "found %i hits in %.2fms"%(len(results.top_n),results.runtime*1000)
ids = list(set(long(result['id']) for result in results))
result = SearchEngineResult(ids, self.handler)
return result
#return ids
def _get_schema(self):
from whoosh.fields import Schema, TEXT, ID, KEYWORD, NUMERIC
from whoosh.analysis import StemmingAnalyzer
return Schema(id=ID(unique=True,stored=True),
db=ID(stored=True),
name=ID(stored=True),
accession=KEYWORD(scorable=True),
identifier=ID(stored=True),
description=TEXT(stored=True),
taxonomy=KEYWORD(lowercase=True,
commas=True,
scorable=True),
keyword=KEYWORD(lowercase=True,
commas=True,
scorable=True),
annotation=TEXT(analyzer=StemmingAnalyzer()),
annotationtype=KEYWORD(lowercase=True,
scorable=True),
comment=TEXT(analyzer=StemmingAnalyzer()),
feature=TEXT(analyzer=StemmingAnalyzer()),
featuretype=KEYWORD(lowercase=True,
commas=True,
scorable=True),
lenght=NUMERIC(),
dbxref=KEYWORD(scorable=True),
pubid=KEYWORD(scorable=True),
pubtitle=TEXT(analyzer=StemmingAnalyzer()),
pubauth=KEYWORD(lowercase=True,
commas=True,
scorable=True),
pubjournal=KEYWORD(lowercase=True,
commas=True,
scorable=True),
)
def map_to_index(self, handler, bioentry_id):
documents = super(WhooshBackend, self).map_to_index(handler, bioentry_id)
for k,v in documents.items():
if isinstance(v, list):
documents[k]=unicode(" ".join(v))
return documents
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query, limit = limit,
scored=True,
fieldnames = ['accession',
'description',
'name'])
else:
return self.search(query,scored=True,
fieldnames = ['accession',
'description',
'name'])
class SolrBackend(BioentrySearchEngineBackend):
def __init__(self, handler, url="http://localhost:8983",schema=""):
self.handler = handler
self.biodb = handler.adaptor
self.url = url
if not schema:
schema = self._get_default_schema()
self.schemadoc = schema
# if DEBUG: print schema
def _get_default_schema(self):#TODO: update schema.xml to make strings not case sensitive
from gluon import request
return os.path.join(request.folder, 'databases', 'solr_schema.xml')
def indexes(self):
import sunburnt
# import pysolr
# if 1:
try:
self.interface = sunburnt.SolrInterface(self.url, self.schemadoc)
except:
raise RuntimeError("Cannot connect to Solr: %s" % self.url)
# self.interface = pysolr.Solr(self.url)
def rebuild(self, bioentry_ids=[], **kwargs):
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding"
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
i, m = 0, 100
while True:
start = i*m
end = (i+1)*m
if DEBUG:
print "searching for round ",start,end
rows = self.biodb(self.biodb.bioentry.id.belongs(bioentries[start:end])).select(self.biodb.bioentry.id)
documents = []
for row in rows:
try:
documents.append(self.map_to_index(self.handler,row.id))
except:
if DEBUG:
print "error building index for id: ",row.id
traceback.print_exc()
self.interface.add(documents)
# self.interface.add_many(documents)
if len(rows)<m: break
i+=1
self.interface.commit()
def search(self, query, **kwargs):
# results = self.interface.query(**fieldkeys).paginate(0,limit)
# ids = [r['id'] for r in results]
# return ids
fieldnames = kwargs.pop('fieldnames', self.interface.schema.fields.keys())
search_all_fields = kwargs.pop('search_all_fields', False)
if search_all_fields:
fieldnames = self.interface.schema.fields.keys()
qd = dict()
fields = self.interface.schema.fields
for fname in fieldnames:
field = fields[fname]
if getattr(field, "class") == 'solr.StrField' :
qd[fname] = query
elif getattr(field, "class") == 'solr.TriIntField' :
try:
qd[fname] = int(query)
except:
pass
results = self.interface.query(**qd).field_limit("id") .execute()#TODO: modify to get the OR by default
if DEBUG: print "found %i hits in %.2fms"%(len(results),results.QTime)
ids = list(set(long(result['id']) for result in results))
result = SearchEngineResult(ids, self.handler)#TODO: return the stored data to avoid querying the db again if possible. use a Storage object and try to get the required fields, otherwise fallback to db query.
return result
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query,rows=limit,
fieldnames = ['accession',
'description',
'name'])
else:
return self.search(query,fieldnames = ['accession',
'description',
'name'])
def map_to_index(self, handler, bioentry_id):
document = super(SolrBackend, self).map_to_index(handler, bioentry_id)
try:
document['lenght'] = int(document['lenght'])
except:
pass
return document
class ElasticSearchBackend(BioentrySearchEngineBackend):
def __init__(self, handler, nodes = [], index_name = 'mybiodb'):
self.handler = handler
self.biodb = handler.adaptor
self.nodes = nodes
self.index_name = index_name
def rebuild(self, bioentry_ids=[], **kwargs):
# self.create_loading_Pool()
bioentries = kwargs.get('bientry_ids',[])
if DEBUG: print "starting global index rebuilding"
if not bioentries:
bioentries = [row.id for row in self.biodb(self.biodb.bioentry.id >0
).select(self.biodb.bioentry.id)]
if DEBUG: print "starting indexing of %i bioentries"%len(bioentries)
#iterate over all bioentries at 100 max a time
# self.pool.apply_async(picklable_call, args = (self, 'add_bioentry_id_to_index', zip(bioentries, range(len(bioentries)))))
# self.pool.close()
# self.pool.join()
for i,bioentry_id in enumerate(bioentries):
self.add_bioentry_id_to_index(bioentry_id, i)
def add_bioentry_id_to_index(self, bioentry_id, counter = 1):
if counter%100 ==0 and DEBUG:
print "\tadded %i bioentries to index"%counter
try:
self.interface.index(index=self.index_name,
doc_type="full_bioentry",
id=bioentry_id,
body=self.map_to_index(self.handler,bioentry_id)
)
except:
if DEBUG:
print "error building index for id: ",bioentry_id
traceback.print_exc()
def search(self, query, **kwargs):
if DEBUG:
from datetime import datetime
start_time = datetime.now()
fieldnames = kwargs.pop('fieldnames', "_all")
size = kwargs.pop('limit', 100)
from_ = kwargs.pop('from', 0)
# results = self.interface.search(index=self.index_name, body={"query": {"query_string": {
# "query": query},
# "term": {
# "fields": fieldnames},
#
# 'from': from_arg,
# 'size' : size
# }})
results = self.interface.search(index=self.index_name,
q= query,
from_ =from_,
size = size,
#fields = fieldnames,#TODO: should be the list of fields to return, check!
_source_include = ['id','name','accession','description'])
if DEBUG:
print "found %i hits in %ims"%(results['hits']['total'], results['took'])
ids = []
data = []
if results['hits']['total']:
for r in results['hits']['hits']:
ids = r['_id']
data.append( dict( id = r['_source']['id'],
name = r['_source']['name'],
accession = r['_source']['accession'][0],
description = r['_source']['description']))
return SearchEngineResult(ids, self.handler, data)
def quick_search(self, query, limit = 0):
if limit > 0:
return self.search(query,
size = limit,
fieldnames = ['accession',
'description',
'name'],
**{'from':0 })
else:
return self.search(query,fieldnames = ['accession',
'description',
'name'])
def map_to_index(self, handler, bioentry_id):
return super(ElasticSearchBackend, self).map_to_index(handler, bioentry_id)
def indexes(self, **kwargs):
import elasticsearch
if self.nodes:
try:
self.interface = self.interface = elasticsearch.Elasticsearch(self.nodes, **kwargs)
except:
raise RuntimeError("Cannot connect to ElasticSearch nodes: %s" % ", ".join(self.nodes))
else:
try:
self.interface = elasticsearch.Elasticsearch(**kwargs)
except:
raise RuntimeError("Cannot connect to ElasticSearch on localhost")
class BioentrySearchEngine(object):
def __init__(self,handler, backend=WhooshBackend,**kwargs):
self.handler = handler
self.backend = backend(handler, **kwargs)
def indexes(self):
'''init indexes '''
self.backend.indexes()
#self.table._after_insert.append(
# lambda fields,id: self.backend.after_insert(fields,id))
#self.table._after_update.append(
# lambda queryset,fields: self.backend.after_update(queryset,fields))
#self.table._after_delete.append(
# lambda queryset: self.backend.after_delete(queryset))
def rebuild(self, **kwargs):
self.backend.rebuild( **kwargs)
def search(self, query, **kwargs):
return self.backend.search(query, **kwargs)
def quick_search(self, query, limit = 0):
return self.backend.quick_search(query, limit)
def add_bioentry_id_to_index(self, bioentry_id):
return self.backend.add_bioentry_id_to_index(bioentry_id)
|
bsd-3-clause
| -3,849,043,284,926,460,400
| 38.941292
| 216
| 0.507937
| false
| 4.511494
| false
| false
| false
|
freedomtan/tensorflow
|
tensorflow/python/keras/preprocessing/timeseries.py
|
1
|
8536
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras timeseries dataset utilities."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.preprocessing.timeseries_dataset_from_array', v1=[])
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Arguments:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
`targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass None if you don't have target data (in this case the dataset will
only yield the input data).
sequence_length: Length of the output sequences (in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for create a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A tf.data.Dataset instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 99]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 3 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 99).
Example 2: temporal regression. Consider an array `data` of scalar
values, of shape `(steps,)`. To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data
offset = 10
targets = data[offset:]
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
input_data, targets, sequence_length=offset)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
assert np.array_equal(targets[0], data[10]) # Corresponding target: step 10
break
```
"""
if start_index and (start_index < 0 or start_index >= len(data)):
raise ValueError('start_index must be higher than 0 and lower than the '
'length of the data. Got: start_index=%s '
'for data of length %s.' % (start_index, len(data)))
if end_index:
if start_index and end_index <= start_index:
raise ValueError('end_index must be higher than start_index. Got: '
'start_index=%s, end_index=%s.' %
(start_index, end_index))
if end_index >= len(data):
raise ValueError('end_index must be lower than the length of the data. '
'Got: end_index=%s' % (end_index,))
if end_index <= 0:
raise ValueError('end_index must be higher than 0. '
'Got: end_index=%s' % (end_index,))
# Validate strides
if sampling_rate <= 0 or sampling_rate >= len(data):
raise ValueError(
'sampling_rate must be higher than 0 and lower than '
'the length of the data. Got: '
'sampling_rate=%s for data of length %s.' % (sampling_rate, len(data)))
if sequence_stride <= 0 or sequence_stride >= len(data):
raise ValueError(
'sequence_stride must be higher than 0 and lower than '
'the length of the data. Got: sequence_stride=%s '
'for data of length %s.' % (sequence_stride, len(data)))
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory usage).
num_seqs = end_index - start_index - (sequence_length * sampling_rate) + 1
if targets is not None:
num_seqs = min(num_seqs, len(targets))
if num_seqs < 2147483647:
index_dtype = 'int32'
else:
index_dtype = 'int64'
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = math_ops.cast(sequence_length, dtype=index_dtype)
sampling_rate = math_ops.cast(sampling_rate, dtype=index_dtype)
positions_ds = dataset_ops.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: math_ops.range( # pylint: disable=g-long-lambda
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: positions[i],
num_parallel_calls=dataset_ops.AUTOTUNE)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index)
dataset = dataset_ops.Dataset.zip((dataset, target_ds))
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = dataset_ops.Dataset.from_tensors(array[start_index : end_index])
dataset = dataset_ops.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: array_ops.gather(steps, inds), # pylint: disable=unnecessary-lambda
num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
|
apache-2.0
| 8,371,571,566,166,718,000
| 40.843137
| 94
| 0.678304
| false
| 3.894161
| false
| false
| false
|
akintolga/superdesk-aap
|
server/aap/io/feed_parsers/zczc_medianet.py
|
1
|
4102
|
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license*.
from .zczc import ZCZCFeedParser
from superdesk.metadata.item import FORMAT, FORMATS
from superdesk.io.registry import register_feeding_service_error
from superdesk.errors import AlreadyExistsError
from superdesk.io.registry import register_feed_parser
from aap.errors import AAPParserError
import superdesk
from bs4 import BeautifulSoup, NavigableString
from superdesk.io.iptc import subject_codes
class ZCZCMedianetParser(ZCZCFeedParser):
NAME = 'Medianet_zczc'
place_map = {'MNETALL': 'FED',
'MNETNSW': 'NSW',
'MNETQLD': 'QLD',
'MNETVIC': 'VIC',
'MNETSA': 'SA',
'MNETWA': 'WA',
'MNETACT': 'ACT',
'MNETNT': 'NT',
'MNETTAS': 'TAS'}
subject_map = {'MFI': '04000000',
'MEN': '01021000',
'MSP': '15000000',
'MHE': '07007000',
'MIT': '13010000'}
def set_item_defaults(self, item, provider):
super().set_item_defaults(item, provider)
# Medianet
item[FORMAT] = FORMATS.PRESERVED
item['original_source'] = 'Medianet'
item['urgency'] = 5
self.CATEGORY = '$'
self.TAKEKEY = ':'
self.PLACE = '%'
self.header_map = {self.PLACE: self.ITEM_PLACE, self.TAKEKEY: self.ITEM_TAKE_KEY}
def post_process_item(self, item, provider):
InvestorRelease = (len(item.get('anpa_category', [])) and
item['anpa_category'][0].get('qcode', '').lower() == 'k')
if InvestorRelease:
# IRW News Release:
item['slugline'] = 'IRW News Release'
item['headline'] = 'IRW News Release: ' + item.get(self.ITEM_TAKE_KEY, '')
else:
item['slugline'] = 'Media Release'
item['headline'] = 'Media Release: ' + item.get(self.ITEM_TAKE_KEY, '')
# Truncate the take key if required
if len(item.get(self.ITEM_TAKE_KEY, '')) > 24:
item[self.ITEM_TAKE_KEY] = item.get(self.ITEM_TAKE_KEY, '')[0:24]
genre_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='genre')
item['genre'] = [x for x in genre_map.get('items', []) if
x['qcode'] == 'Press Release' and x['is_active']]
soup = BeautifulSoup(item.get('body_html', ''), "html.parser")
ptag = soup.find('pre')
if ptag is not None:
if InvestorRelease:
ptag.insert(0, NavigableString(
'{} '.format('Investor Relations news release distributed by AAP Medianet. \r\n\r\n\r\n')))
else:
ptag.insert(0, NavigableString('{} '.format('Media release distributed by AAP Medianet. \r\n\r\n\r\n')))
item['body_html'] = str(soup)
locator_map = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='locators')
place_strs = item.pop('place').split(' ')
for place in place_strs:
if place in self.place_map:
replace = [x for x in locator_map.get('items', []) if
x['qcode'] == self.place_map.get(place, '').upper()]
if replace is not None:
item[self.ITEM_PLACE] = replace
if place in self.subject_map:
if item.get(self.ITEM_SUBJECT) is None:
item[self.ITEM_SUBJECT] = []
item['subject'].append(
{'qcode': self.subject_map.get(place), 'name': subject_codes[self.subject_map.get(place)]})
return item
try:
register_feed_parser(ZCZCMedianetParser.NAME, ZCZCMedianetParser())
except AlreadyExistsError as ex:
pass
register_feeding_service_error('file', AAPParserError.ZCZCParserError().get_error_description())
|
agpl-3.0
| -4,504,097,776,540,009,500
| 39.215686
| 120
| 0.573379
| false
| 3.588801
| false
| false
| false
|
mdworks2016/work_development
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py
|
4
|
3816
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geos.geometry import GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
geom_func_prefix = 'ST_'
Adapter = WKTAdapter
@cached_property
def select(self):
return self.geom_func_prefix + 'AsBinary(%s)'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
return {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='ST_Contains'),
'crosses': SpatialOperator(func='ST_Crosses'),
'disjoint': SpatialOperator(func='ST_Disjoint'),
'equals': SpatialOperator(func='ST_Equals'),
'exact': SpatialOperator(func='ST_Equals'),
'intersects': SpatialOperator(func='ST_Intersects'),
'overlaps': SpatialOperator(func='ST_Overlaps'),
'same_as': SpatialOperator(func='ST_Equals'),
'touches': SpatialOperator(func='ST_Touches'),
'within': SpatialOperator(func='ST_Within'),
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGML', 'AsKML', 'AsSVG', 'Azimuth', 'BoundingCircle',
'ForcePolygonCW', 'GeometryDistance', 'LineLocatePoint',
'MakeValid', 'MemSize', 'Perimeter', 'PointOnSurface', 'Reverse',
'Scale', 'SnapToGrid', 'Transform', 'Translate',
}
if self.connection.mysql_is_mariadb:
unsupported.remove('PointOnSurface')
unsupported.update({'GeoHash', 'IsValid'})
if self.connection.mysql_version < (10, 2, 4):
unsupported.add('AsGeoJSON')
elif self.connection.mysql_version < (5, 7, 5):
unsupported.update({'AsGeoJSON', 'GeoHash', 'IsValid'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_distance(self, f, value, lookup_type):
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError(
'Only numeric values of degree units are allowed on '
'geodetic distance queries.'
)
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value)), geom_class)
if srid:
geom.srid = srid
return geom
return converter
|
apache-2.0
| 3,364,332,919,509,989,400
| 37.545455
| 95
| 0.622379
| false
| 4.170492
| false
| false
| false
|
dguevel/PyZOGY
|
PyZOGY/test/mock_image_class.py
|
1
|
1822
|
import numpy as np
from astropy.io import fits
class MockImageClass(np.ndarray):
"""Creates a mock version of ImageClass for testing"""
def __new__(cls, image_filename='', psf_filename='', mask_filename=None, n_stamps=1, saturation=np.inf, variance=np.inf, shape=(50,50)):
raw_image, header = np.ones(shape), fits.Header()#fits.getdata(image_filename, header=True)
raw_psf = np.ones(shape)
mask = np.zeros(shape)
background_std, background_counts = np.ones(shape), np.zeros(shape)
image_data = np.ones(shape)
obj = np.asarray(image_data).view(cls)
obj.header = header
obj.raw_image = raw_image
obj.raw_psf = raw_psf
obj.background_std = background_std
obj.background_counts = background_counts
obj.image_filename = image_filename
obj.psf_filename = psf_filename
obj.saturation = saturation
obj.mask = mask
obj.psf = raw_psf
obj.zero_point = 1.
obj.variance = variance
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.raw_image = getattr(obj, 'raw_image', None)
self.header = getattr(obj, 'header', None)
self.raw_psf = getattr(obj, 'raw_psf', None)
self.background_std = getattr(obj, 'background_std', None)
self.background_counts = getattr(obj, 'background_counts', None)
self.image_filename = getattr(obj, 'image_filename', None)
self.psf_filename = getattr(obj, 'psf_filename', None)
self.saturation = getattr(obj, 'saturation', None)
self.mask = getattr(obj, 'mask', None)
self.psf = getattr(obj, 'psf', None)
self.zero_point = getattr(obj, 'zero_point', None)
self.variance = getattr(obj, 'variance', None)
|
mit
| 7,069,680,640,840,340,000
| 40.409091
| 140
| 0.618551
| false
| 3.531008
| false
| false
| false
|
rawdlite/mopidy-beets-local
|
setup.py
|
1
|
1430
|
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-BeetsLocal',
version=get_version('mopidy_beetslocal/__init__.py'),
url='https://github.com/rawdlite/mopidy-beets-local',
license='Apache License, Version 2.0',
author='Tom Roth',
author_email='rawdlite@googlemail.com',
description='Access local beets library',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'Pykka >= 1.1',
'uritools >= 0.11',
'beets'
],
test_suite='nose.collector',
tests_require=[
'nose',
'mock >= 1.0',
],
entry_points={
'mopidy.ext': [
'beetslocal = mopidy_beetslocal:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
apache-2.0
| -1,899,545,129,688,441,900
| 27.039216
| 68
| 0.586713
| false
| 3.575
| false
| false
| false
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/metadata_etl/value_sets.py
|
1
|
3136
|
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl import MetadataSingleEntityTransformTask
from cumulusci.utils.xml.metadata_tree import MetadataElement
class AddValueSetEntries(MetadataSingleEntityTransformTask):
entity = "StandardValueSet"
task_options = {
**MetadataSingleEntityTransformTask.task_options,
"entries": {
"description": "Array of standardValues to insert. "
"Each standardValue should contain the keys 'fullName', the API name of the entry, "
"and 'label', the user-facing label. OpportunityStage entries require the additional "
"keys 'closed', 'won', 'forecastCategory', and 'probability'; CaseStatus entries "
"require 'closed'.",
"required": True,
},
"api_names": {
"description": "List of API names of StandardValueSets to affect, "
"such as 'OpportunityStage', 'AccountType', 'CaseStatus'",
"required": True,
},
}
def _transform_entity(self, metadata: MetadataElement, api_name: str):
for entry in self.options.get("entries", []):
if "fullName" not in entry or "label" not in entry:
raise TaskOptionsError(
"Standard value set entries must contain the 'fullName' and 'label' keys."
)
# Check for extra metadata on CaseStatus and OpportunityStage
if api_name == "OpportunityStage":
if not all(
[
"closed" in entry,
"forecastCategory" in entry,
"probability" in entry,
"won" in entry,
]
):
raise TaskOptionsError(
"OpportunityStage standard value set entries require the keys "
"'closed', 'forecastCategory', 'probability', and 'won'"
)
if api_name == "CaseStatus":
if "closed" not in entry:
raise TaskOptionsError(
"CaseStatus standard value set entries require the key 'closed'"
)
existing_entry = metadata.findall(
"standardValue", fullName=entry["fullName"]
)
if not existing_entry:
# Entry doesn't exist. Insert it.
elem = metadata.append(tag="standardValue")
elem.append("fullName", text=entry["fullName"])
elem.append("label", text=entry["label"])
elem.append("default", text="false")
if api_name in ["OpportunityStage", "CaseStatus"]:
elem.append("closed", str(entry["closed"]).lower())
if api_name == "OpportunityStage":
elem.append("won", str(entry["won"]).lower())
elem.append("probability", str(entry["probability"]))
elem.append("forecastCategory", entry["forecastCategory"])
return metadata
|
bsd-3-clause
| -2,364,462,508,562,814,000
| 41.958904
| 98
| 0.546237
| false
| 4.962025
| false
| false
| false
|
mrtukkin/ifp
|
olympic_layer/olympic_data_layer.py
|
1
|
3300
|
import caffe
import numpy as np
from glob import glob
import random
from PIL import Image
from os.path import normpath, basename
from scipy.misc import imresize
from ifp_morris import downsample_segmentation
class OlympicDataLayer(caffe.Layer):
im_factor = 1.0
#label_factor = 0.25
label_factor = 0.5
# im_head = '/export/home/mfrank/data/OlympicSports/clips/'
# label_head = '/export/home/mfrank/results/OlympicSports/segmentations/'
im_head = '/export/home/mfrank/data/OlympicSports/patches/'
label_head = '/export/home/mfrank/results/OlympicSports/segmentation_patches/'
def setup(self, bottom, top):
print 'Setting up the OlympicDataLayer...'
self.top_names = ['data', 'label']
# config
params = eval(self.param_str)
self.path_file = params['path_file']
self.mean = np.array(params['mean'])
self.random = params.get('randomize', False)
self.seed = params.get('seed', None)
self.data_ext = params.get('data_ext', 'jpg')
self.label_ext = params.get('label_ext', 'npy')
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
self.paths = open(self.path_file, 'r').read().splitlines()
self.idx = 0
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.paths[self.idx])
self.label = self.load_label(self.paths[self.idx])
# while np.min([self.data.shape[1], self.data.shape[2]]) < 340:
# self.data = imresize(self.data, 2.0).transpose((2, 0, 1))
# self.label = self.label.repeat(2, axis=1).repeat(2, axis=2)
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label
# pick next input
if self.random:
self.idx = random.randint(0, len(self.paths) - 1)
else:
self.idx += 1
if self.idx == len(self.paths):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, path):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open(self.im_head + path + self.data_ext)
if self.im_factor == 1:
in_ = im
else:
in_ = imresize(im, self.im_factor)
in_ = np.array(in_, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= self.mean
in_ = in_.transpose((2, 0, 1))
return in_
def load_label(self, path):
label = np.load(self.label_head + path + self.label_ext).astype('int')
if self.label_factor != 1:
label = downsample_segmentation(label, int(1/self.label_factor))
label = label[np.newaxis, ...]
return label
|
gpl-3.0
| -33,537,435,243,634,360
| 31.352941
| 82
| 0.578788
| false
| 3.473684
| false
| false
| false
|
UCHIC/ODM2Sensor
|
src/sensordatainterface/forms.py
|
1
|
31049
|
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms import ModelForm, TextInput, Textarea, NumberInput, ModelChoiceField, DateTimeInput, Select, SelectMultiple \
, ModelMultipleChoiceField, FileInput, HiddenInput
from django.forms.models import modelformset_factory
from sensordatainterface.models import *
from django.utils.translation import ugettext_lazy as _
from django import forms
from datetime import datetime
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.forms.util import flatatt
from django.forms.fields import BooleanField
class PrettyCheckboxWidget(forms.widgets.CheckboxInput):
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
final_attrs['value'] = force_text(value)
if 'prettycheckbox-label' in final_attrs:
label = final_attrs.pop('prettycheckbox-label')
else:
label = ''
return format_html('<label class="checkbox-label" for="{0}"><input{1} /> {2}</label>', attrs['id'], flatatt(final_attrs), label.capitalize())
class PrettyCheckboxField(BooleanField):
widget = PrettyCheckboxWidget
def __init__(self, *args, **kwargs):
if kwargs['label']:
kwargs['widget'].attrs['prettycheckbox-label'] = kwargs['label']
kwargs['label'] = ''
super(PrettyCheckboxField, self).__init__(*args, **kwargs)
class SamplingFeatureChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.samplingfeaturename
class OrganizationChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.organizationname
class EquipmentModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.modelname
def validate(self, value):
pass
def to_python(self, value):
try:
value = super(EquipmentModelChoiceField, self).to_python(value)
except self.queryset.model.DoesNotExist:
key = self.to_field_name or 'pk'
value = EquipmentModel.objects.filter(**{key: value})
if not value.exists():
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
else:
value = value.first()
return value
class PeopleChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.personfirstname + " " + obj.personlastname
class PeopleMultipleChoice(ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.organizationid.organizationname + ": " + obj.personid.personfirstname + " " + obj.personid.personlastname
class DeploymentActionChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
action = obj.actionid
equipment = obj.equipmentid
equipment_model = equipment.equipmentmodelid
feature_actions = action.featureaction.all()
feature_action = feature_actions[0] if feature_actions.count() > 0 else None
manufacturer = equipment_model.modelmanufacturerid if equipment_model is not None else None
info = str(action.begindatetime) + ' '
info += (str(feature_action.samplingfeatureid.samplingfeaturecode) + ' ') if feature_action is not None else ''
info += (str(equipment.equipmentserialnumber) + ' ' + str(equipment.equipmenttypecv.name) + ' ') if equipment is not None else ''
info += (str(manufacturer.organizationname) + ' ') if manufacturer is not None else ''
info += (str(equipment_model.modelpartnumber) + ' ') if equipment_model is not None else ''
return info
class MethodChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.methodname
class UnitChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.unitsname
class ProcessingLevelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.processinglevelcode
class MultipleEquipmentChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.equipmentcode + ": " + obj.equipmentserialnumber + " (" + obj.equipmenttypecv.name + ", " + obj.equipmentmodelid.modelname + ")"
def clean(self, value):
cleaned_value = self._check_values(value)
return cleaned_value
class SiteVisitChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
start_time = str(obj.begindatetime)
sampling_feature_code = obj.featureaction.filter(actionid=obj).get().samplingfeatureid.samplingfeaturecode
return "(" + start_time + ") " + sampling_feature_code
class EquipmentChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.equipmentcode + ": " + obj.equipmentserialnumber + " (" + obj.equipmenttypecv.name + ", " + obj.equipmentmodelid.modelname + ")"
class CalibrationStandardMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, obj):
if obj.referencematerialvalue.count() > 0:
referencematerialvalue = obj.referencematerialvalue.get()
value_information = ": " + referencematerialvalue.variableid.variablenamecv.name + " " + \
str(referencematerialvalue.referencematerialvalue) + " " + \
referencematerialvalue.unitsid.unitsabbreviation
else:
value_information = ''
return obj.referencematerialmediumcv.name + ' : ' + obj.referencematerialcode + " " + \
obj.referencemateriallotcode + value_information
class VariableChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.variablecode + ": " + obj.variablenamecv.name
class DeploymentChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.methodname
class InstrumentOutputVariableChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.modelid.modelname + ": " + obj.variableid.variablecode + ' ' + obj.variableid.variablenamecv.name
class ActionAnnotationChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.annotationtext
time_zone_choices = (
(-12, '-12:00'),
(-11, '-11:00'),
(-10, '-10:00'),
(-9, '-9:00'),
(-8, '-8:00 PST'),
(-7, '-7:00 MST'),
(-6, '-6:00 CST'),
(-5, '-5:00 EST'),
(-4, '-4:00'),
(-3, '-3:00'),
(-2, '-2:00'),
(-1, '-1:00'),
(0, '±0:00'),
(1, '+1:00'),
(2, '+2:00'),
(3, '+3:00'),
(4, '+4:00'),
(5, '+5:00'),
(6, '+6:00'),
(7, '+7:00'),
(8, '+8:00'),
(9, '+9:00'),
(10, '+10:00'),
(11, '+11:00'),
(12, '+12:00'),
(13, '+13:00'),
(14, '+14:00'),
)
class SamplingFeatureForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = SamplingFeature
fields = [
'samplingfeaturecode',
'samplingfeaturename',
'samplingfeaturedescription',
'elevation_m',
'elevationdatumcv',
'samplingfeaturegeotypecv',
]
widgets = {
'samplingfeaturecode': TextInput,
'samplingfeaturename': TextInput,
'elevation_m': NumberInput,
}
labels = {
'samplingfeaturecode': _('Site Code'),
'samplingfeaturename': _('Site Name'),
'samplingfeaturedescription': _('Site Description'),
'elevation_m': _('Elevation (m)'),
'elevationdatumcv': _('Elevation Datum'),
'samplingfeaturegeotypecv': _('Geo-Type'),
}
class SiteForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = Sites
fields = [
'latitude',
'longitude',
'sitetypecv',
'spatialreferenceid'
]
widgets = {
'samplingfeaturename': TextInput,
'latitude': NumberInput,
'longitude': NumberInput,
}
labels = {
'latlondatumid': _('Spatial Reference'),
'latitude': _('Latitude (dec deg)'),
'longitude': _('Longitude (dec deg)'),
'sitetypecv': _('Site Type'),
'spatialreferenceid': _('Spatial Reference'),
}
class EquipmentForm(ModelForm):
required_css_class = 'form-required'
equipmentvendorid = OrganizationChoiceField(queryset=Organization.objects.all(), label='Equipment Vendor', empty_label='Choose an Organization')
equipmentmodelid = EquipmentModelChoiceField(queryset=EquipmentModel.objects.all(), label='Equipment Model', empty_label='Choose a Model')
equipmentpurchasedate = forms.DateTimeField(initial=datetime.now(), label='Purchase Date')
equipmentownerid = PeopleChoiceField(queryset=People.objects.all(), label='Owner', empty_label='Choose an Owner')
class Meta:
model = Equipment
fields = [
'equipmentcode',
'equipmentserialnumber',
'equipmentname',
'equipmenttypecv',
'equipmentpurchaseordernumber',
'equipmentpurchasedate',
'equipmentdescription',
'equipmentownerid',
'equipmentdocumentationlink',
]
widgets = {
'equipmentname': TextInput,
'equipmentcode': TextInput,
'equipmentserialnumber': TextInput,
'equipmentpurchaseordernumber': TextInput,
'equipmentdocumentationlink': FileInput,
}
labels = {
'equipmentname': _('Equipment Name'),
'equipmentcode': _('Equipment Code'),
'equipmentserialnumber': _('Serial Number'),
'equipmenttypecv': _('Equipment Type'),
'equipmentpurchaseordernumber': _('Purchase Order Number'),
'equipmentdescription': _('Description'),
'equipmentdocumentationlink': _('Documentation Link')
}
class EquipmentModelForm(ModelForm):
required_css_class = 'form-required'
modelmanufacturerid = OrganizationChoiceField(queryset=Organization.objects.all(), label='Equipment Manufacturer',
empty_label='Choose a Manufacturer')
class Meta:
model = EquipmentModel
fields = [
'modelname',
'modelpartnumber',
'modeldescription',
'isinstrument',
'modellink',
'modelspecificationsfilelink',
]
widgets = {
'modelpartnumber': TextInput,
'modelname': TextInput,
'modellink': TextInput,
}
labels = {
'modelpartnumber': _('Part Number'),
'modelname': _('Model Name'),
'modeldescription': _('Description'),
'isinstrument': _('Is Instrument'),
'modellink': _('Model Link'),
'modelspecificationsfilelink': _('Specifications File'),
}
class EquipmentUsedForm(ModelForm):
required_css_class = 'form-required'
equipmentid = EquipmentChoiceField(
queryset=Equipment.objects.all(),
label='Equipment',
empty_label='Choose an Equipment'
)
class Meta:
model = EquipmentUsed
exclude = [
'actionid'
]
class PersonForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = People
fields = [
'personfirstname',
'personlastname',
]
widgets = {
'personfirstname': TextInput,
'personlastname': TextInput,
}
labels = {
'personfirstname': _('First Name'),
'personlastname': _('Last Name')
}
class AffiliationForm(ModelForm):
required_css_class = 'form-required'
organizationid = OrganizationChoiceField(
queryset=Organization.objects.all(),
# this select will show all organizations and an option to create a new one.
label='Organization',
empty_label='Choose an Organization'
)
class Meta:
model = Affiliation
fields = [
'isprimaryorganizationcontact',
'primaryaddress',
'primaryphone', # gotta set the affiliation start date to current date.`
'primaryemail',
]
widgets = {
'primaryaddress': TextInput,
'primaryphone': TextInput,
'primaryemail': TextInput,
}
labels = {
'isprimaryorganizationcontact': _('Is Primary Organization Contact'),
'primaryaddress': _('Address'),
'primaryphone': _('Phone Number'),
'primaryemail': _('Email'),
}
class VendorForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = Organization
fields = [
'organizationcode',
'organizationname',
'organizationdescription',
'organizationtypecv',
'organizationlink',
]
widgets = {
'organizationcode': TextInput,
'organizationname': TextInput,
'organizationlink': TextInput,
}
labels = {
'organizationcode': _('Code'),
'organizationname': _('Name'),
'organizationdescription': _('Description'),
'organizationtypecv': _('Organization Type'),
'organizationlink': _('Website'),
}
class ReferenceMaterialForm(ModelForm):
required_css_class = 'form-required'
referencematerialorganizationid = OrganizationChoiceField(
queryset=Organization.objects.all(),
label='Organization',
empty_label='Choose an Organization'
)
class Meta:
model = ReferenceMaterial
fields = [
'referencematerialpurchasedate',
'referencemateriallotcode',
'referencematerialexpirationdate',
'referencematerialcertificatelink',
'referencematerialmediumcv'
]
widgets = {
'referencematerialpurchasedate': DateTimeInput,
'referencemateriallotcode': TextInput,
'referencematerialexpirationdate': DateTimeInput,
}
labels = {
'referencematerialpurchasedate': _('Purchase Date'),
'referencemateriallotcode': _('Lot Code'),
'referencematerialexpirationdate': _('Expiration Date'),
'referencematerialcertificatelink': _('Certificate File'),
'referencematerialmediumcv': _('Medium'),
}
class ReferenceMaterialValueForm(ModelForm):
required_css_class = 'form-required'
variableid = VariableChoiceField(
queryset=Variable.objects.all(),
label='Variable',
empty_label='Choose a Variable'
)
unitsid = UnitChoiceField(
queryset=Units.objects.all(),
label='Units',
empty_label='Choose a Unit'
)
class Meta:
model = ReferenceMaterialValue
fields = [
'referencematerialvalue',
'referencematerialaccuracy'
]
widgets = {
'referencematerialvalue': NumberInput,
}
labels = {
'referencematerialvalue': 'Reference Material Value',
'referencematerialaccuracy': 'Accuracy',
}
class MethodForm(ModelForm):
required_css_class = 'form-required'
organizationid = OrganizationChoiceField(
queryset=Organization.objects.all(),
label='Organization',
empty_label='Choose an Organization',
required=False
)
class Meta:
model = Method
fields = [
'methodcode',
'methodname',
'methodtypecv',
'methoddescription',
'methodlink'
]
widgets = {
'methodcode': TextInput,
'methodlink': TextInput,
'methodname': Textarea,
}
labels = {
'methodcode': _('Method Code'),
'methodname': _('Method Name'),
'methodtypecv': _('Method Type'),
'methoddescription': _('Description'),
'methodlink': _('Method Link')
}
class OutputVariableForm(ModelForm):
required_css_class = 'form-required'
instrumentmethodid = MethodChoiceField(
queryset=Method.objects.all(),
label='Method',
empty_label='Choose a Method'
)
variableid = VariableChoiceField(
queryset=Variable.objects.all(),
label='Variable',
empty_label='Choose a Variable'
)
modelid = EquipmentModelChoiceField(
queryset=EquipmentModel.objects.all(),
label='Model',
empty_label='Choose a Model'
)
instrumentrawoutputunitsid = UnitChoiceField(
queryset=Units.objects.all(),
label='Unit',
empty_label='Choose a Unit'
)
class Meta:
model = InstrumentOutputVariable
fields = [
'variableid',
'modelid',
'instrumentresolution',
'instrumentaccuracy',
'instrumentrawoutputunitsid',
]
widgets = {
'instrumentresolution': TextInput,
'instrumentaccuracy': TextInput
}
labels = {
'instrumentresolution': _('Instrument Resolution'),
'instrumentaccuracy': _('Instrument Accuracy')
}
class SiteDeploymentMeasuredVariableForm(ModelForm):
required_css_class = 'form-required'
instrumentmethodid = MethodChoiceField(
queryset=Method.objects.all(),
label='Method',
empty_label='Choose a Method'
)
variableid = VariableChoiceField(
queryset=Variable.objects.all(),
label='Variable',
empty_label='Choose a Variable'
)
instrumentrawoutputunitsid = UnitChoiceField(
queryset=Units.objects.all(),
label='Unit',
empty_label='Choose a Unit'
)
class Meta:
model = InstrumentOutputVariable
fields = [
'variableid',
'instrumentresolution',
'instrumentaccuracy',
'instrumentrawoutputunitsid',
]
widgets = {
'instrumentresolution': TextInput,
'instrumentaccuracy': TextInput,
}
labels = {
'instrumentresolution': _('Instrument Resolution'),
'instrumentaccuracy': _('Instrument Accuracy')
}
class FactoryServiceActionForm(ModelForm):
required_css_class = 'form-required'
methodid = MethodChoiceField(queryset=Method.objects.all(), label='Method',
empty_label='Choose a Method')
class Meta:
model = Action
fields = [
'begindatetime',
'begindatetimeutcoffset',
'enddatetime',
'enddatetimeutcoffset',
'actiondescription',
'actionfilelink',
]
widgets = {
'begindatetime': DateTimeInput,
'begindatetimeutcoffset': Select(choices=time_zone_choices),
'enddatetime': DateTimeInput,
'enddatetimeutcoffset': Select(choices=time_zone_choices),
'actionfilelink': FileInput,
}
labels = {
'begindatetime': _('Begin Date Time'),
'begindatetimeutcoffset': _('Begin UTC Offset'),
'enddatetime': _('End Date Time'),
'enddatetimeutcoffset': _('End UTC Offset'),
'actionfilelink': _('Action File'),
'actiondescription': _('Description')
}
class MaintenanceActionForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = MaintenanceAction
fields = [
# 'isfactoryservice' YES
'maintenancecode',
'maintenancereason',
]
widgets = {
# 'isfactoryservice': BooleanField,
'maintenancecode': TextInput,
}
labels = {
# 'isfactoryservice': _('Is Factory Service')
'maintenancecode': _('Maintenance Code'),
'maintenancereason': _('Maintenance Reason')
}
class SiteVisitForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = Action
fields = [
'begindatetime',
'begindatetimeutcoffset',
'enddatetime',
'enddatetimeutcoffset',
'actiondescription',
]
widgets = {
'begindatetimeutcoffset': Select(choices=time_zone_choices),
'enddatetimeutcoffset': Select(choices=time_zone_choices),
}
labels = {
'begindatetime': _('Begin Date Time'),
'begindatetimeutcoffset': _('Begin UTC Offset'),
'enddatetime': _('End Date Time'),
'enddatetimeutcoffset': _('End UTC Offset'),
'actiondescription': _('Description'),
}
class CrewForm(forms.Form):
required_css_class = 'form-required'
affiliationid = PeopleMultipleChoice(queryset=Affiliation.objects.all(), label="Crew")
def __init__(self, *args, **kwargs):
super(CrewForm, self).__init__(*args, **kwargs)
self.fields['affiliationid'].help_text = None
class FeatureActionForm(ModelForm):
required_css_class = 'form-required'
samplingfeatureid = SamplingFeatureChoiceField(
queryset=SamplingFeature.objects.all(),
label='Site',
empty_label="Choose a Site"
)
class Meta:
model = FeatureAction
fields = [
'samplingfeatureid'
]
class SiteVisitChoiceForm(ModelForm):
required_css_class = 'form-required'
actionid = SiteVisitChoiceField(
queryset=Action.objects.filter(actiontypecv='Site Visit').order_by('-begindatetime'),
label='Site Visit',
empty_label='Choose a Site Visit'
)
class Meta:
model = Action
fields = [
'actionid'
]
class SelectWithClassForOptions(Select):
def render_option(self, *args, **kwargs):
option_html = super(SelectWithClassForOptions, self).render_option(*args, **kwargs)
this_method = args[1]
class_value = "class=\"\""
if this_method != "":
class_value = Method.objects.get(pk=this_method).methodtypecv.name.replace(' ', '')
after_tag = 8
before_tag_close = 7
return option_html[:after_tag] + "class=\"" + class_value + "\"" + option_html[before_tag_close:]
class ActionForm(ModelForm):
def __init__(self, *args, **kwargs):
actiontype = kwargs.pop('actiontype', None)
super(ActionForm, self).__init__(*args, **kwargs)
self.fields['equipmentused'].help_text = None
self.fields['calibrationstandard'].help_text = None
self.fields['calibrationreferenceequipment'].help_text = None
self.fields['equipmentused'].required = False
required_css_class = 'form-required'
methodid = MethodChoiceField(queryset=Method.objects.all(), label='Method',
empty_label='Choose a Method', widget=SelectWithClassForOptions)
# add additional fields and put classes to make visible depending on action type.
# fields for equipment maintenance:
equipmentused = MultipleEquipmentChoiceField(
queryset=Equipment.objects.all(), label='Equipment Used', required=False
)
equipment_by_site = PrettyCheckboxField(widget=PrettyCheckboxWidget(
attrs={'class': 'Instrumentcalibration Notype'}), label='Show All Equipment', required=False
)
equipmentusednumber = forms.IntegerField(widget=HiddenInput(), required=False, initial=0)
calibrationstandard = CalibrationStandardMultipleChoiceField(
widget=forms.SelectMultiple(attrs={'class': 'Instrumentcalibration'}),
queryset=ReferenceMaterial.objects.all(), label='Calibration Standards', required=False
)
calibrationstandardnumber = forms.IntegerField(widget=HiddenInput(), required=False, initial=0)
calibrationreferenceequipment = MultipleEquipmentChoiceField(
widget=forms.SelectMultiple(attrs={'class': 'Instrumentcalibration'}),
queryset=Equipment.objects.all(), label='Reference Equipment',
required=False
)
calibrationreferenceequipmentnumber = forms.IntegerField(widget=HiddenInput(), required=False, initial=0)
isfactoryservice = forms.BooleanField(
widget=forms.CheckboxInput(attrs={'class': 'Equipmentmaintenance'}), label='Is Factory Service', required=False)
isfactoryservicebool = forms.BooleanField(
widget=HiddenInput(), initial='False', required=False
)
maintenancecode = forms.CharField(
widget=forms.TextInput(attrs={'class': 'Equipmentmaintenance'}), label='Maintenance Code', required=False)
maintenancereason = forms.CharField(
widget=forms.Textarea(attrs={'class': 'Equipmentmaintenance'}), label='Maintenance Reason', required=False)
# fields for calibration
instrumentoutputvariable = InstrumentOutputVariableChoiceField(
widget=forms.Select(attrs={'class': 'Instrumentcalibration'}),
queryset=InstrumentOutputVariable.objects.all(), label='Instrument Output Variable', required=False)
calibrationcheckvalue = forms.DecimalField(
widget=forms.NumberInput(attrs={'class': 'Instrumentcalibration'}), label='Calibration Check Value', required=False)
calibrationequation = forms.CharField(
widget=forms.TextInput(attrs={'class': 'Instrumentcalibration'}), label='Calibration Equation', required=False)
# fields for retrieval
deploymentaction = DeploymentActionChoiceField(widget=forms.Select(attrs={'class': 'Instrumentretrieval Equipmentretrieval'}), label='Deployment', to_field_name='actionid',
queryset=EquipmentUsed.objects.filter(Q(actionid__actiontypecv__term='equipmentDeployment') | Q(actionid__actiontypecv__term='instrumentDeployment')),
required=False
)
thisactionid = forms.IntegerField(widget=HiddenInput(), required=False, initial=0)
class Meta:
model = Action
fields = [
'actiontypecv',
'deploymentaction',
'begindatetime',
'begindatetimeutcoffset',
'enddatetime',
'enddatetimeutcoffset',
'actiondescription',
'actionfilelink',
'methodid',
]
widgets = {
# 'actiontypecv': Select(choices=[
# ('Field activity', 'Generic'),
# ('Equipment deployment', 'Deployment'),
# ('Instrument calibration', 'Calibration'),
# ('Equipment maintenance', 'Maintenance')
# ]),
'begindatetime': DateTimeInput,
'begindatetimeutcoffset': Select(choices=time_zone_choices),
'enddatetime': DateTimeInput,
'enddatetimeutcoffset': Select(choices=time_zone_choices),
'actionfilelink': FileInput,
# 'methodid': SelectWithClassForOptions,
}
labels = {
'actiontypecv': _('Action Type'),
'begindatetime': _('Begin Date Time'),
'begindatetimeutcoffset': _('Begin UTC Offset'),
'enddatetime': _('End Date Time'),
'enddatetimeutcoffset': _('End UTC Offset'),
'actionfilelink': _('Action File'),
'actiondescription': _('Description')
}
def clean(self):
return super(ActionForm, self).clean()
def clean_equipmentused(self):
equipment = self.data['equipmentused']
action_type = self.data['actiontypecv']
required_types = ['Equipment maintenance', 'Equipment programming', 'Instrument retrieval',
'Instrument calibration', 'Equipment deployment', 'Instrument deployment', 'Equipment retrieval']
if action_type in required_types and len(equipment) == 0:
raise ValidationError(_('This field is required'))
return self.cleaned_data['equipmentused']
class ResultsForm(forms.Form):
required_css_class = 'form-required'
instrumentoutputvariable = InstrumentOutputVariableChoiceField(
widget=forms.Select(attrs={'class': ''}),
queryset=InstrumentOutputVariable.objects.all(), label='Instrument Output Variable', required=True)
unitsid = UnitChoiceField(
widget=forms.Select(attrs={'class': ''}),
queryset=Units.objects.all(), label='Units', required=True)
processing_level_id = ProcessingLevelChoiceField(
widget=forms.Select(attrs={'class': ''}),
queryset=ProcessingLevel.objects.all(), label='Processing Level', required=True)
sampledmediumcv = forms.ModelChoiceField(
widget=forms.Select(attrs={'class': ''}),
queryset=CvMedium.objects.all(), label='Sampled Medium', required=True)
class AnnotationForm(forms.ModelForm):
required_css_class = 'form-required'
annotationid = ActionAnnotationChoiceField(queryset=Annotation.objects.all(),
label='Annotation', empty_label='Choose an Annotation')
class Meta:
model = Annotation
fields = [
'annotationid',
'annotationcode',
'annotationtext',
'annotationdatetime',
'annotationutcoffset'
]
widgets = {
'annotationcode': forms.TextInput,
'annotationtext': forms.TextInput,
'annotationdatetime': DateTimeInput,
'annotationutcoffset': Select(choices=time_zone_choices),
}
labels = {
'annotationid': _('Annotation'),
'annotationcode': _('Annotation Code'),
'annotationtext': _('Annotation Text'),
'annotationdatetime': _('Annotation Date Time'),
'annotationutcoffset': _('Annotation UTC Offset')
}
def get_cv_model_form(form_model, *args, **kwargs):
class CVForm(ModelForm):
required_css_class = 'form-required'
class Meta:
model = form_model
fields = ['term', 'name', 'definition', 'category', 'sourcevocabularyuri']
labels = {'sourcevocabularyuri': 'Source Vocabulary URI'}
widgets = {
'term': TextInput,
'name': TextInput,
'category': TextInput,
'sourcevocabularyuri': TextInput
}
def __init__(self):
super(CVForm, self).__init__(*args, **kwargs)
return CVForm()
|
bsd-3-clause
| -1,623,401,265,978,536,200
| 32.492988
| 176
| 0.60735
| false
| 4.515416
| false
| false
| false
|
zackdever/vsims
|
vsims/nestedstore.py
|
1
|
2886
|
from vsims.block import Block
class NestedStore:
"""Simple key-value store that supports nested transactional blocks."""
def __init__(self):
self.blocks = []
self.store = {}
self.value_counts = {}
def set(self, key, value, doLog=True):
"""Add the key to the store if not already present, and set its value.
key - key to add or update
value - value set for key
doLog - determines if a reverse operation should be logged
"""
has_key = self.has_key(key)
if not self.is_flat() and doLog:
block = self.blocks[-1]
if has_key:
block.log(self.set, key, self.get(key), False)
else:
block.log(self.delete, key, False)
if has_key:
old_value = self.get(key)
if old_value != value:
self._update_value_count_(old_value, -1)
self._update_value_count_(value, 1)
else:
self._update_value_count_(value, 1)
self.store[key] = value
def get(self, key):
"""Returns the value of the given key.
throws: KeyError if key is not present in the store
"""
return self.store[key]
def has_key(self, key):
"""Determines if the store contains the key."""
return self.store.has_key(key)
def delete(self, key, doLog=True):
"""Deletes the key from the store if present.
key - key to delete
doLog - determines if a reverse operation should be logged
"""
if self.has_key(key):
if not self.is_flat() and doLog:
self.blocks[-1].log(self.set, key, self.get(key), False)
self._update_value_count_(self.get(key), -1)
del self.store[key]
def nest(self):
"""Start a new transactional block."""
self.blocks.append(Block())
def pop_nest(self):
"""End the currently open transactional block.
throws: IndexError if there are no open transactional blocks.
"""
self.blocks.pop().rollback()
def flatten(self):
"""Permanently stores and closes all open transactional blocks."""
self.blocks = []
def is_flat(self):
"""Returns True if there are no open transactional blocks."""
return len(self.blocks) == 0
def numequalto(self, value):
"""Returns the number of keys set to the provided value."""
if not self.value_counts.has_key(value):
self.value_counts[value] = 0
return 0
return self.value_counts[value]
def _update_value_count_(self, value, count):
"""Set or update the count for the provided value."""
if self.value_counts.has_key(value):
self.value_counts[value] += count
else:
self.value_counts[value] = count
|
mit
| 7,066,074,681,128,292,000
| 30.369565
| 78
| 0.567221
| false
| 4.053371
| false
| false
| false
|
Venturi/cms
|
env/lib/python2.7/site-packages/aldryn_people/south_migrations/0009_rename_tables_because_of_new_cms.py
|
1
|
14315
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import connection
class Migration(SchemaMigration):
TABLES_MAPPING = {
'cmsplugin_peopleplugin': 'aldryn_people_peopleplugin',
}
REVERSE_TABLES_MAPPING = dict((v, k) for k, v in TABLES_MAPPING.iteritems())
@staticmethod
def rename_tables_by_map(mapper):
tables_names = connection.introspection.table_names()
for table_name in tables_names:
new_table_name = mapper.get(table_name)
if new_table_name:
db.rename_table(table_name, new_table_name)
def forwards(self, orm):
self.rename_tables_by_map(self.TABLES_MAPPING)
def backwards(self, orm):
self.rename_tables_by_map(self.REVERSE_TABLES_MAPPING)
models = {
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
u'aldryn_people.grouptranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'GroupTranslation', 'db_table': "u'aldryn_people_group_translation'"},
'company_description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Group']"})
},
u'aldryn_people.peopleplugin': {
'Meta': {'object_name': 'PeoplePlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'group_by_group': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'people': ('sortedm2m.fields.SortedManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'show_links': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'aldryn_people.persontranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'PersonTranslation', 'db_table': "u'aldryn_people_person_translation'"},
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Person']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_people']
|
gpl-2.0
| 6,811,224,199,680,188,000
| 80.335227
| 192
| 0.557248
| false
| 3.585922
| false
| false
| false
|
WebCampZg/conference-web
|
people/admin.py
|
1
|
1117
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from people.models import User
from django.utils.translation import ugettext as _
from .forms import CustomUserCreationForm, CustomUserChangeForm
class CustomUserAdmin(UserAdmin):
# Set the add/modify forms
add_form = CustomUserCreationForm
form = CustomUserChangeForm
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': (
'first_name', 'last_name', 'twitter', 'github', 'tshirt_size')}),
(_('Permissions'), {'fields': (
'is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
list_display = ('email', 'first_name', 'last_name', 'is_staff', 'is_superuser')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('first_name', 'last_name', 'email')
ordering = ('email',)
admin.site.register(User, CustomUserAdmin)
|
bsd-3-clause
| -74,037,275,173,721,460
| 35.032258
| 85
| 0.610564
| false
| 3.735786
| false
| false
| false
|
juju/python-libjuju
|
juju/client/facade.py
|
1
|
29940
|
import argparse
import builtins
import functools
import json
import keyword
import pprint
import re
import textwrap
import typing
import typing_inspect
from collections import defaultdict
from glob import glob
from pathlib import Path
from typing import Any, Mapping, Sequence, TypeVar
from . import codegen
_marker = object()
JUJU_VERSION = re.compile(r'[0-9]+\.[0-9-]+[\.\-][0-9a-z]+(\.[0-9]+)?')
# Workaround for https://bugs.launchpad.net/juju/+bug/1683906
NAUGHTY_CLASSES = ['ClientFacade', 'Client', 'ModelStatusInfo']
# Map basic types to Python's typing with a callable
SCHEMA_TO_PYTHON = {
'string': str,
'integer': int,
'float': float,
'number': float,
'boolean': bool,
'object': Any,
}
# Friendly warning message to stick at the top of generated files.
HEADER = """\
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
"""
# Classes and helper functions that we'll write to _client.py
LOOKUP_FACADE = '''
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name))
'''
TYPE_FACTORY = '''
class TypeFactory:
@classmethod
def from_connection(cls, connection):
"""
Given a connected Connection object, return an initialized and
connected instance of an API Interface matching the name of
this class.
@param connection: initialized Connection object.
"""
facade_name = cls.__name__
if not facade_name.endswith('Facade'):
raise TypeError('Unexpected class name: {}'.format(facade_name))
facade_name = facade_name[:-len('Facade')]
version = connection.facades.get(facade_name)
if version is None:
raise Exception('No facade {} in facades {}'.format(facade_name,
connection.facades))
c = lookup_facade(cls.__name__, version)
c = c()
c.connect(connection)
return c
@classmethod
def best_facade_version(cls, connection):
"""
Returns the best facade version for a given facade. This will help with
trying to provide different functionality for different facade versions.
@param connection: initialized Connection object.
"""
facade_name = cls.__name__
if not facade_name.endswith('Facade'):
raise TypeError('Unexpected class name: {}'.format(facade_name))
facade_name = facade_name[:-len('Facade')]
return connection.facades.get(facade_name)
'''
CLIENT_TABLE = '''
CLIENTS = {{
{clients}
}}
'''
class KindRegistry(dict):
def register(self, name, version, obj):
self[name] = {version: {
"object": obj,
}}
def lookup(self, name, version=None):
"""If version is omitted, max version is used"""
versions = self.get(name)
if not versions:
return None
if version:
return versions[version]
return versions[max(versions)]
def getObj(self, name, version=None):
result = self.lookup(name, version)
if result:
obj = result["object"]
return obj
return None
class TypeRegistry(dict):
def __init__(self, schema):
self.schema = schema
def get(self, name):
# Two way mapping
refname = self.schema.referenceName(name)
if refname not in self:
result = TypeVar(refname)
self[refname] = result
self[result] = refname
return self[refname]
def getRefType(self, ref):
return self.get(ref)
def objType(self, obj):
kind = obj.get('type')
if not kind:
raise ValueError("%s has no type" % obj)
result = SCHEMA_TO_PYTHON.get(kind)
if not result:
raise ValueError("%s has type %s" % (obj, kind))
return result
def refType(self, obj):
return self.getRefType(obj["$ref"])
CLASSES = {}
factories = codegen.Capture()
def booler(v):
if isinstance(v, str):
if v == "false":
return False
return bool(v)
basic_types = [str, bool, int, float]
type_mapping = {
'str': '(bytes, str)',
'Sequence': '(bytes, str, list)',
'Union': 'dict',
'Mapping': 'dict',
}
def name_to_py(name):
result = name.replace("-", "_")
result = result.lower()
if keyword.iskeyword(result) or result in dir(builtins):
result += "_"
return result
def var_type_to_py(kind):
return 'None'
def kind_to_py(kind):
if kind is None or kind is typing.Any:
return 'None', '', False
name = ""
if typing_inspect.is_generic_type(kind):
origin = typing_inspect.get_origin(kind)
name = origin.__name__
else:
name = kind.__name__
if (kind in basic_types or type(kind) in basic_types):
return name, type_mapping.get(name) or name, True
if (name in type_mapping):
return name, type_mapping[name], True
suffix = name.lstrip("~")
return suffix, "(dict, {})".format(suffix), True
def strcast(kind, keep_builtins=False):
if (kind in basic_types or
type(kind) in basic_types) and keep_builtins is False:
return kind.__name__
if str(kind).startswith('~'):
return str(kind)[1:]
if kind is typing.Any:
return 'Any'
try:
if issubclass(kind, typing.GenericMeta):
return str(kind)[1:]
except AttributeError:
pass
return kind
class Args(list):
def __init__(self, schema, defs):
self.schema = schema
self.defs = defs
if defs:
rtypes = schema.registry.getObj(schema.types[defs])
if len(rtypes) == 1:
if not self.do_explode(rtypes[0][1]):
for name, rtype in rtypes:
self.append((name, rtype))
else:
for name, rtype in rtypes:
self.append((name, rtype))
def do_explode(self, kind):
if kind in basic_types or type(kind) is typing.TypeVar:
return False
if typing_inspect.is_generic_type(kind) and issubclass(typing_inspect.get_origin(kind), Sequence):
return False
if typing_inspect.is_generic_type(kind) and issubclass(typing_inspect.get_origin(kind), Mapping):
return False
self.clear()
self.extend(Args(self.schema, kind))
return True
def PyToSchemaMapping(self):
m = {}
for n, rt in self:
m[name_to_py(n)] = n
return m
def SchemaToPyMapping(self):
m = {}
for n, tr in self:
m[n] = name_to_py(n)
return m
def _format(self, name, rtype, typed=True):
if typed:
return "{} : {}".format(
name_to_py(name),
strcast(rtype)
)
else:
return name_to_py(name)
def _get_arg_str(self, typed=False, joined=", "):
if self:
parts = []
for item in self:
parts.append(self._format(item[0], item[1], typed))
if joined:
return joined.join(parts)
return parts
return ''
def as_kwargs(self):
if self:
parts = []
for item in self:
var_name = name_to_py(item[0])
var_type = var_type_to_py(item[1])
parts.append('{}={}'.format(var_name, var_type))
return ', '.join(parts)
return ''
def as_validation(self):
"""
as_validation returns a series of validation statements for every item
in the the Args.
"""
parts = []
for item in self:
var_name = name_to_py(item[0])
var_type, var_sub_type, ok = kind_to_py(item[1])
if ok:
parts.append(buildValidation(var_name, var_type, var_sub_type))
return '\n'.join(parts)
def typed(self):
return self._get_arg_str(True)
def __str__(self):
return self._get_arg_str(False)
def get_doc(self):
return self._get_arg_str(True, "\n")
def buildValidation(name, instance_type, instance_sub_type, ident=None):
INDENT = ident or " "
source = """{ident}if {name} is not None and not isinstance({name}, {instance_sub_type}):
{ident} raise Exception("Expected {name} to be a {instance_type}, received: {{}}".format(type({name})))
""".format(ident=INDENT,
name=name,
instance_type=instance_type,
instance_sub_type=instance_sub_type)
return source
def buildTypes(schema, capture):
INDENT = " "
for kind in sorted((k for k in schema.types if not isinstance(k, str)),
key=lambda x: str(x)):
name = schema.types[kind]
if name in capture and name not in NAUGHTY_CLASSES:
continue
args = Args(schema, kind)
# Write Factory class for _client.py
make_factory(name)
# Write actual class
source = ["""
class {}(Type):
_toSchema = {}
_toPy = {}
def __init__(self{}{}, **unknown_fields):
'''
{}
'''""".format(
name,
# pprint these to get stable ordering across regens
pprint.pformat(args.PyToSchemaMapping(), width=999),
pprint.pformat(args.SchemaToPyMapping(), width=999),
", " if args else "",
args.as_kwargs(),
textwrap.indent(args.get_doc(), INDENT * 2))]
if not args:
source.append("{}self.unknown_fields = unknown_fields".format(INDENT * 2))
else:
# do the validation first, before setting the variables
for arg in args:
arg_name = name_to_py(arg[0])
arg_type = arg[1]
arg_type_name = strcast(arg_type)
if arg_type in basic_types or arg_type is typing.Any:
source.append("{}{}_ = {}".format(INDENT * 2,
arg_name,
arg_name))
elif type(arg_type) is typing.TypeVar:
source.append("{}{}_ = {}.from_json({}) "
"if {} else None".format(INDENT * 2,
arg_name,
arg_type_name,
arg_name,
arg_name))
elif typing_inspect.is_generic_type(arg_type) and issubclass(typing_inspect.get_origin(arg_type), Sequence):
parameters = typing_inspect.get_parameters(arg_type)
value_type = (
parameters[0]
if len(parameters)
else None
)
if type(value_type) is typing.TypeVar:
source.append(
"{}{}_ = [{}.from_json(o) "
"for o in {} or []]".format(INDENT * 2,
arg_name,
strcast(value_type),
arg_name))
else:
source.append("{}{}_ = {}".format(INDENT * 2,
arg_name,
arg_name))
elif typing_inspect.is_generic_type(arg_type) and issubclass(typing_inspect.get_origin(arg_type), Mapping):
parameters = typing_inspect.get_parameters(arg_type)
value_type = (
parameters[0]
if len(parameters)
else None
)
if type(value_type) is typing.TypeVar:
source.append(
"{}{}_ = {{k: {}.from_json(v) "
"for k, v in ({} or dict()).items()}}".format(
INDENT * 2,
arg_name,
strcast(value_type),
arg_name))
else:
source.append("{}{}_ = {}".format(INDENT * 2,
arg_name,
arg_name))
else:
source.append("{}{}_ = {}".format(INDENT * 2,
arg_name,
arg_name))
if len(args) > 0:
source.append('\n{}# Validate arguments against known Juju API types.'.format(INDENT * 2))
for arg in args:
arg_name = "{}_".format(name_to_py(arg[0]))
arg_type, arg_sub_type, ok = kind_to_py(arg[1])
if ok:
source.append('{}'.format(buildValidation(arg_name,
arg_type,
arg_sub_type,
ident=INDENT * 2)))
for arg in args:
arg_name = name_to_py(arg[0])
source.append('{}self.{} = {}_'.format(INDENT * 2, arg_name, arg_name))
# Ensure that we take the kwargs (unknown_fields) and put it on the
# Results/Params so we can inspect it.
source.append("{}self.unknown_fields = unknown_fields".format(INDENT * 2))
source = "\n".join(source)
capture.clear(name)
capture[name].write(source)
capture[name].write("\n\n")
co = compile(source, __name__, "exec")
ns = _getns(schema)
exec(co, ns)
cls = ns[name]
CLASSES[name] = cls
def retspec(schema, defs):
# return specs
# only return 1, so if there is more than one type
# we need to include a union
# In truth there is only 1 return
# Error or the expected Type
if not defs:
return None
if defs in basic_types:
return strcast(defs, False)
return strcast(defs, False)
def ReturnMapping(cls):
# Annotate the method with a return Type
# so the value can be cast
def decorator(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
nonlocal cls
reply = await f(*args, **kwargs)
if cls is None:
return reply
if 'error' in reply:
cls = CLASSES['Error']
if typing_inspect.is_generic_type(cls) and issubclass(typing_inspect.get_origin(cls), Sequence):
parameters = typing_inspect.get_parameters(cls)
result = []
item_cls = parameters[0]
for item in reply:
result.append(item_cls.from_json(item))
"""
if 'error' in item:
cls = CLASSES['Error']
else:
cls = item_cls
result.append(cls.from_json(item))
"""
else:
result = cls.from_json(reply['response'])
return result
return wrapper
return decorator
def makeFunc(cls, name, description, params, result, _async=True):
INDENT = " "
args = Args(cls.schema, params)
assignments = []
toschema = args.PyToSchemaMapping()
for arg in args._get_arg_str(False, False):
assignments.append("{}_params[\'{}\'] = {}".format(INDENT,
toschema[arg],
arg))
assignments = "\n".join(assignments)
res = retspec(cls.schema, result)
source = """
@ReturnMapping({rettype})
{_async}def {name}(self{argsep}{args}):
'''
{docstring}
Returns -> {res}
'''
{validation}
# map input types to rpc msg
_params = dict()
msg = dict(type='{cls.name}',
request='{name}',
version={cls.version},
params=_params)
{assignments}
reply = {_await}self.rpc(msg)
return reply
"""
if description != "":
description = "{}\n\n".format(description)
doc_string = "{}{}".format(description, args.get_doc())
fsource = source.format(_async="async " if _async else "",
name=name,
argsep=", " if args else "",
args=args.as_kwargs(),
res=res,
validation=args.as_validation(),
rettype=result.__name__ if result else None,
docstring=textwrap.indent(doc_string, INDENT),
cls=cls,
assignments=assignments,
_await="await " if _async else "")
ns = _getns(cls.schema)
exec(fsource, ns)
func = ns[name]
return func, fsource
def buildMethods(cls, capture):
properties = cls.schema['properties']
for methodname in sorted(properties):
method, source = _buildMethod(cls, methodname)
setattr(cls, methodname, method)
capture["{}Facade".format(cls.__name__)].write(source, depth=1)
def _buildMethod(cls, name):
params = None
result = None
method = cls.schema['properties'][name]
description = ""
if 'description' in method:
description = method['description']
if 'properties' in method:
prop = method['properties']
spec = prop.get('Params')
if spec:
params = cls.schema.types.get(spec['$ref'])
spec = prop.get('Result')
if spec:
if '$ref' in spec:
result = cls.schema.types.get(spec['$ref'])
else:
result = SCHEMA_TO_PYTHON[spec['type']]
return makeFunc(cls, name, description, params, result)
def buildFacade(schema):
cls = type(schema.name, (Type,), dict(name=schema.name,
version=schema.version,
schema=schema))
source = """
class {name}Facade(Type):
name = '{name}'
version = {version}
schema = {schema}
""".format(name=schema.name,
version=schema.version,
schema=textwrap.indent(pprint.pformat(schema), " "))
return cls, source
class TypeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Type):
return obj.serialize()
return json.JSONEncoder.default(self, obj)
class Type:
def connect(self, connection):
self.connection = connection
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __eq__(self, other):
if not isinstance(other, Type):
return NotImplemented
return self.__dict__ == other.__dict__
async def rpc(self, msg):
result = await self.connection.rpc(msg, encoder=TypeEncoder)
return result
@classmethod
def from_json(cls, data):
if isinstance(data, cls):
return data
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
raise
d = {}
for k, v in (data or {}).items():
d[cls._toPy.get(k, k)] = v
try:
return cls(**d)
except TypeError:
raise
def serialize(self):
d = {}
for attr, tgt in self._toSchema.items():
d[tgt] = getattr(self, attr)
return d
def to_json(self):
return json.dumps(self.serialize(), cls=TypeEncoder, sort_keys=True)
# treat subscript gets as JSON representation
def __getitem__(self, key):
attr = self._toPy[key]
return getattr(self, attr)
# treat subscript sets as JSON representation
def __setitem__(self, key, value):
attr = self._toPy[key]
setattr(self, attr, value)
# legacy: generated definitions used to not correctly
# create typed objects and would use dict instead (from JSON)
# so we emulate some dict methods.
def get(self, key, default=None):
try:
attr = self._toPy[key]
except KeyError:
return default
return getattr(self, attr, default)
class Schema(dict):
def __init__(self, schema):
self.name = schema['Name']
self.version = schema['Version']
self.update(schema['Schema'])
self.registry = KindRegistry()
self.types = TypeRegistry(self)
def referenceName(self, ref):
if ref.startswith("#/definitions/"):
ref = ref.rsplit("/", 1)[-1]
return ref
def buildDefinitions(self):
# here we are building the types out
# anything in definitions is a type
# but these may contain references themselves
# so we dfs to the bottom and build upwards
# when a types is already in the registry
defs = self.get('definitions')
if not defs:
return
definitions = {}
for d, data in defs.items():
if d in self.registry and d not in NAUGHTY_CLASSES:
continue
if data.get("type") != "object":
continue
definitions[d] = data
for d, definition in definitions.items():
node = self.buildObject(definition, d)
self.registry.register(d, self.version, node)
self.types.getRefType(d)
def buildObject(self, node, name=None):
# we don't need to build types recursively here
# they are all in definitions already
# we only want to include the type reference
# which we can derive from the name
struct = []
add = struct.append
props = node.get("properties")
pprops = node.get("patternProperties")
if props:
# Sort these so the __init__ arg list for each Type remains
# consistently ordered across regens of client.py
for p in sorted(props):
prop = props[p]
if "$ref" in prop:
add((p, self.types.refType(prop)))
else:
kind = prop['type']
if kind == "array":
add((p, self.buildArray(prop)))
elif kind == "object":
struct.extend(self.buildObject(prop, p))
else:
add((p, self.types.objType(prop)))
if pprops:
if ".*" not in pprops:
raise ValueError(
"Cannot handle actual pattern in patternProperties %s" %
pprops)
pprop = pprops[".*"]
if "$ref" in pprop:
add((name, Mapping[str, self.types.refType(pprop)]))
return struct
ppkind = pprop["type"]
if ppkind == "array":
add((name, Mapping[str, self.buildArray(pprop)]))
else:
add((name, Mapping[str, SCHEMA_TO_PYTHON[ppkind]]))
if not struct and node.get('additionalProperties', False):
add((name, SCHEMA_TO_PYTHON.get('object')))
return struct
def buildArray(self, obj):
# return a sequence from an array in the schema
if "$ref" in obj:
return Sequence[self.types.refType(obj)]
else:
kind = obj.get("type")
if kind and kind == "array":
items = obj['items']
return self.buildArray(items)
else:
return Sequence[self.types.objType(obj)]
def _getns(schema):
ns = {'Type': Type,
'typing': typing,
'ReturnMapping': ReturnMapping
}
# Copy our types into the globals of the method
for facade in schema.registry:
ns[facade] = schema.registry.getObj(facade)
return ns
def make_factory(name):
if name in factories:
del factories[name]
factories[name].write("class {}(TypeFactory):\n pass\n\n".format(name))
def write_facades(captures, options):
"""
Write the Facades to the appropriate _client<version>.py
"""
for version in sorted(captures.keys()):
filename = "{}/_client{}.py".format(options.output_dir, version)
with open(filename, "w") as f:
f.write(HEADER)
f.write("from juju.client.facade import Type, ReturnMapping\n")
f.write("from juju.client._definitions import *\n\n")
for key in sorted(
[k for k in captures[version].keys() if "Facade" in k]):
print(captures[version][key], file=f)
# Return the last (most recent) version for use in other routines.
return version
def write_definitions(captures, options):
"""
Write auxillary (non versioned) classes to
_definitions.py The auxillary classes currently get
written redudantly into each capture object, so we can look in
one of them -- we just use the last one from the loop above.
"""
with open("{}/_definitions.py".format(options.output_dir), "w") as f:
f.write(HEADER)
f.write("from juju.client.facade import Type, ReturnMapping\n\n")
for key in sorted(
[k for k in captures.keys() if "Facade" not in k]):
print(captures[key], file=f)
def write_client(captures, options):
"""
Write the TypeFactory classes to _client.py, along with some
imports and tables so that we can look up versioned Facades.
"""
with open("{}/_client.py".format(options.output_dir), "w") as f:
f.write(HEADER)
f.write("from juju.client._definitions import *\n\n")
clients = ", ".join("_client{}".format(v) for v in captures)
f.write("from juju.client import " + clients + "\n\n")
f.write(CLIENT_TABLE.format(clients=",\n ".join(
['"{}": _client{}'.format(v, v) for v in captures])))
f.write(LOOKUP_FACADE)
f.write(TYPE_FACTORY)
for key in sorted([k for k in factories.keys() if "Facade" in k]):
print(factories[key], file=f)
def generate_definitions(schemas):
# Build all of the auxillary (unversioned) classes
# TODO: get rid of some of the excess trips through loops in the
# called functions.
definitions = codegen.Capture()
for juju_version in sorted(schemas.keys()):
for schema in schemas[juju_version]:
schema.buildDefinitions()
# ensure we write the latest ones first, so that earlier revisions
# get dropped.
for juju_version in sorted(schemas.keys(), reverse=True):
for schema in schemas[juju_version]:
buildTypes(schema, definitions)
return definitions
def generate_facades(schemas):
captures = defaultdict(codegen.Capture)
# Build the Facade classes
for juju_version in sorted(schemas.keys()):
for schema in schemas[juju_version]:
cls, source = buildFacade(schema)
cls_name = "{}Facade".format(schema.name)
captures[schema.version].clear(cls_name)
# Make the factory class for _client.py
make_factory(cls_name)
# Make the actual class
captures[schema.version][cls_name].write(source)
# Build the methods for each Facade class.
buildMethods(cls, captures[schema.version])
# Mark this Facade class as being done for this version --
# helps mitigate some excessive looping.
CLASSES[schema.name] = cls
return captures
def load_schemas(options):
schemas = {}
for p in sorted(glob(options.schema)):
if 'latest' in p:
juju_version = 'latest'
else:
try:
juju_version = re.search(JUJU_VERSION, p).group()
except AttributeError:
print("Cannot extract a juju version from {}".format(p))
print("Schemas must include a juju version in the filename")
raise SystemExit(1)
new_schemas = json.loads(Path(p).read_text("utf-8"))
schemas[juju_version] = [Schema(s) for s in new_schemas]
return schemas
def setup():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--schema", default="juju/client/schemas*")
parser.add_argument("-o", "--output_dir", default="juju/client")
options = parser.parse_args()
return options
def main():
options = setup()
schemas = load_schemas(options)
# Generate some text blobs
definitions = generate_definitions(schemas)
captures = generate_facades(schemas)
# ... and write them out
write_definitions(definitions, options)
write_facades(captures, options)
write_client(captures, options)
if __name__ == '__main__':
main()
|
apache-2.0
| -4,172,417,565,822,012,000
| 31.472885
| 124
| 0.52682
| false
| 4.21809
| false
| false
| false
|
artursmet/django-prices-openexchangerates
|
setup.py
|
1
|
1578
|
#! /usr/bin/env python
import os
from setuptools import setup
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_settings')
CLASSIFIERS = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules']
setup(
name='django-prices-openexchangerates',
author='Mirumee Software',
author_email='hello@mirumee.com',
description='openexchangerates.org support for django-prices',
license='BSD',
version='0.1.11',
url='https://github.com/mirumee/django-prices-openexchanerates',
packages=[
'django_prices_openexchangerates',
'django_prices_openexchangerates.management',
'django_prices_openexchangerates.management.commands',
'django_prices_openexchangerates.migrations',
'django_prices_openexchangerates.templatetags'],
include_package_data=True,
classifiers=CLASSIFIERS,
install_requires=['Django>=1.4', 'django-prices>=0.3.4', 'prices>=0.5.2'],
platforms=['any'],
tests_require=['mock==1.0.1'],
test_suite='django_prices_openexchangerates.tests',
zip_safe=False)
|
bsd-3-clause
| 7,590,473,101,946,400,000
| 36.571429
| 78
| 0.676806
| false
| 3.730496
| false
| false
| false
|
Yangqing/caffe2
|
caffe2/python/layers/sparse_lookup.py
|
1
|
13787
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
get_key,
IdList,
IdScoreList,
LayerPsParam,
ModelLayer,
)
import collections
import functools
import math
import numpy as np
import operator
def get_sparse_lookup_predictor_version(version):
assert version in {'fp32', 'fp16', 'uint8rowwise', 'fused_uint8rowwise'},\
"Unexpected version of sparse_lookup layer {0}".format(version)
return version
def _is_id_list(input_record):
return schema.equal_schemas(input_record, IdList)
def _is_id_score_list(input_record):
return schema.equal_schemas(input_record,
IdScoreList,
check_field_types=False)
class SparseLookup(ModelLayer):
_id_list_supported_reducers = [
'LogMeanExp', 'LogSumExp', 'Max', 'Mean', 'Sum',
'WeightedSum', 'WeightedMean', 'Sqrt', 'None']
_id_score_list_supported_reducers = [
'PositionWeighted', 'Mean', 'Sum', 'WeightedSum', 'WeightedMean', 'None']
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', regularizer=None, **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
# TODO Add some asserts about input type
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0}".\
format(type(inner_shape))
if reducer == "PositionWeighted":
assert _is_id_score_list(self.input_record), (
"PositionWeighted only support IdScoreList, but got {} " +
"please use PositionWeighted layer to convert IdList " +
"to IdScoreList").format(repr(self.input_record))
self.external_weights = input_record.values()
self.reducer = reducer
input_dim = get_categorical_limit(input_record)
assert input_dim > 0, (
"{} should have categorical limit > 0, but got {}".format(
get_key(input_record)(), input_dim))
scale = math.sqrt(1.0 / input_dim)
self.shape = [input_dim] + inner_shape
self.weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
if _is_id_list(self.input_record):
sparse_key = self.input_record.items()
elif _is_id_score_list(self.input_record):
sparse_key = self.input_record.keys()
else:
raise NotImplementedError()
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.w = self.create_param(
param_name='w',
shape=self.shape,
initializer=self.weight_init,
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=sparse_key,
average_length=avg_length),
regularizer=regularizer
)
self.scale_bias_init = ('ConstantFill', {'value': 0.0})
self.scale_bias = self.create_param(
param_name='scale_bias',
shape=[],
initializer=self.scale_bias_init,
optimizer=model.NoOptim,
)
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
self.get_next_blob_reference('output'),
)
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def support_8bit(self):
# Rowwise quantization makes sense only if shape it's 2D matrix with
# second dimension >= 8
if len(self.shape) != 2 or self.shape[1] < 8:
return False
return True
def get_8bits_compatible_parameters(self, fused=True):
if not self.support_8bit():
return []
if fused:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w'
)
return [RowwiseQuantized8BitsWeight(self.w)]
else:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w, scale_bias'
)
return [RowwiseQuantized8BitsWeight(self.w, self.scale_bias)]
def _gather_wrapper(self, net, version, in_indices, out):
# Gather can work on all kinds of input data types, and output
# data with the same type. Convert the output of Gather to float,
# because the follow-up Ops expect fp32.
if version == 'fp32':
return net.Gather([self.w, in_indices], out)
elif version == 'fp16':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.HalfToFloat(gathered_w, out)
elif version == 'uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
gathered_scale_bias = net.Gather(
[self.scale_bias, in_indices],
'gathered_scale_bias'
)
return net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias], out)
elif version == 'fused_uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.Fused8BitRowwiseQuantizedToFloat(gathered_w, out)
else:
raise "Unsupported version of operators in SparseLookup " +\
"layer: {0}".format(version)
def _sparse_lengths_weighted_reducer(
self, in_indices, weights, reducer,
net, version, grad_on_weights=0):
op_input = [
self.w,
weights,
in_indices,
self.input_record.lengths()
]
layer_name = 'SparseLengths' + reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
# deal with sparse features of id_list type
def _add_ops_id_list(self, net, version):
assert self.reducer in self._id_list_supported_reducers, (
"Unsupported reducer: {} for ID_LIST".format(self.reducer)
)
if self.reducer in ['Sum', 'Mean', 'WeightedSum', 'WeightedMean']:
op_input = [self.w,
self.input_record.items(),
self.input_record.lengths()]
# For id list features, the behaviors of 'Sum' and
# 'WeightedSum' are identical, since we can regard the weight on each
# id as 1. Similarly, for 'Mean' and 'WeightedMean'.
if self.reducer == 'WeightedSum':
self.reducer = 'Sum'
elif self.reducer == 'WeightedMean':
self.reducer = 'Mean'
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[net.NextScopedBlob('lengths_sqrt')],
power=0.5,
)
self._sparse_lengths_weighted_reducer(
self.input_record.items(),
sqrt_weight,
'WeightedSum', net, version)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.items(),
self.output_schema.field_blobs())
else:
table_rows = self._gather_wrapper(
net, version, self.input_record.items(), 'table_rows')
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
self.input_record.lengths() + '_sid')
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
)
# deal with sparse features of id_score_list type
def _add_ops_id_score_list(self, net, version):
assert self.reducer in self._id_score_list_supported_reducers, (
"Unsupported reducer: {} for ID_SCORE_LIST".format(self.reducer)
)
if self.reducer in ['WeightedSum', 'WeightedMean']:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.input_record.values(),
self.reducer, net, version)
elif self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.keys(),
self.input_record.lengths()]
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'PositionWeighted':
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.external_weights,
'WeightedSum', net, version, grad_on_weights=1)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.keys(),
self.output_schema.field_blobs())
else:
raise "Only Sum, Mean, None are supported for IdScoreList input." +\
"Trying to create with {}".format(self.reducer)
def add_ops(self, net):
cur_scope = get_current_scope()
version = get_sparse_lookup_predictor_version(
**cur_scope.get(get_sparse_lookup_predictor_version.__name__,
{'version': 'fp32'}))
# TODO(amalevich): Layer should not be responsible for decision about
# quantization.
if not self.support_8bit() and version in {'uint8rowwise',
'fused_uint8rowwise'}:
version = 'fp32'
if _is_id_list(self.input_record):
self._add_ops_id_list(net, version=version)
elif _is_id_score_list(self.input_record):
self._add_ops_id_score_list(net, version=version)
else:
raise "Unsupported input type {0}".format(self.input_record)
|
apache-2.0
| 2,902,705,643,870,155,000
| 38.846821
| 81
| 0.565895
| false
| 4.013683
| false
| false
| false
|
odahoda/noisicaa
|
noisicaa/builtin_nodes/instrument/model.py
|
1
|
2817
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import logging
from typing import Any, Optional, Callable
from noisicaa import core
from noisicaa import audioproc
from noisicaa import node_db
from noisicaa import instrument_db
from noisicaa.music import node_connector
from . import node_description
from . import processor_messages
from . import _model
logger = logging.getLogger(__name__)
class Connector(node_connector.NodeConnector):
_node = None # type: Instrument
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.__node_id = self._node.pipeline_node_id
self.__listeners = core.ListenerMap[str]()
self.add_cleanup_function(self.__listeners.cleanup)
def _init_internal(self) -> None:
self.__change_instrument(self._node.instrument_uri)
self.__listeners['instrument_uri'] = self._node.instrument_uri_changed.add(
lambda change: self.__change_instrument(change.new_value))
def __change_instrument(self, instrument_uri: str) -> None:
try:
instrument_spec = instrument_db.create_instrument_spec(instrument_uri)
except instrument_db.InvalidInstrumentURI as exc:
logger.error("Invalid instrument URI '%s': %s", instrument_uri, exc)
return
self._emit_message(processor_messages.change_instrument(
self.__node_id, instrument_spec))
class Instrument(_model.Instrument):
def create(self, *, instrument_uri: Optional[str] = None, **kwargs: Any) -> None:
super().create(**kwargs)
self.instrument_uri = instrument_uri
def create_node_connector(
self, message_cb: Callable[[audioproc.ProcessorMessage], None],
audioproc_client: audioproc.AbstractAudioProcClient,
) -> Connector:
return Connector(
node=self, message_cb=message_cb, audioproc_client=audioproc_client)
@property
def description(self) -> node_db.NodeDescription:
return node_description.InstrumentDescription
|
gpl-2.0
| 1,434,338,962,164,650,500
| 34.2125
| 85
| 0.699681
| false
| 3.934358
| false
| false
| false
|
cisalhante/matricula-o-matic
|
test_bench_disciplina.py
|
1
|
1134
|
__author__ = "William Batista Aguiar Motta"
__email__ = "william.b.motta@aluno.unb.br"
__license__ = "GPL"
import matricula_web_web_crawler as MT
import sys
if len(sys.argv)>1:
for a in sys.argv[1:]:
# print('\n')
D = MT.Disciplina(a)
print(D.codigo)
print(D.nome)
print(D.creditos)
print(D.departamento)
print(D.tipo)
print(D.periodo)
print(D.requisitos)
for t in D.turmas: # BUG
print(t.dias)
print(t.professores)
print(t.cod_disciplina)
print(t.id)
print(t.vagas)
print(t.ocupadas)
print(t.disponiveis)
print(t.locais)
else:
D = MT.Disciplina(167657)
print(D.codigo)
print(D.nome)
print(D.creditos)
print(D.departamento)
print(D.tipo)
print(D.periodo)
print(D.requisitos)
for t in D.turmas:
print(t.dias)
print(t.professores)
print(t.cod_disciplina)
print(t.id)
print(t.vagas)
print(t.ocupadas)
print(t.disponiveis)
print(t.locais)
|
gpl-3.0
| -4,359,458,083,531,792,400
| 23.652174
| 44
| 0.538801
| false
| 2.712919
| false
| false
| false
|
ptressel/sahana-eden-madpub
|
modules/s3/s3validators.py
|
1
|
20338
|
# -*- coding: utf-8 -*-
""" Custom Validators
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Fran Boon <fran[at]aidiq.com>
@author: Dominic König <dominic[at]aidiq.com>
@author: sunneach
@copyright: (c) 2010-2011 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["IS_LAT",
"IS_LON",
"IS_HTML_COLOUR",
"THIS_NOT_IN_DB",
"IS_UTC_OFFSET",
"IS_UTC_DATETIME",
"IS_ONE_OF",
"IS_ONE_OF_EMPTY",
"IS_NOT_ONE_OF",
"IS_ACL"]
import time
import uuid
import re
from datetime import datetime, timedelta
from gluon.validators import Validator, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET
def options_sorter(x, y):
return (str(x[1]).upper() > str(y[1]).upper() and 1) or -1
class IS_LAT(object):
"""
example:
INPUT(_type="text", _name="name", requires=IS_LAT())
latitude has to be in degrees between -90 & 90
"""
def __init__(self,
error_message = "Latitude/Northing should be between -90 & 90!"):
self.minimum = -90
self.maximum = 90
self.error_message = error_message
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
except ValueError:
pass
return (value, self.error_message)
class IS_LON(object):
"""
example:
INPUT(_type="text", _name="name" ,requires=IS_LON())
longitude has to be in degrees between -180 & 180
"""
def __init__(self,
error_message = "Longitude/Easting should be between -180 & 180!"):
self.minimum = -180
self.maximum = 180
self.error_message = error_message
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
except ValueError:
pass
return (value, self.error_message)
# -----------------------------------------------------------------------------
class IS_HTML_COLOUR(IS_MATCH):
"""
example::
INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR())
"""
def __init__(self, error_message="must be a 6 digit hex code!"):
IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message)
# -----------------------------------------------------------------------------
class THIS_NOT_IN_DB(object):
"""
Unused currently since doesn't quite work.
See: http://groups.google.com/group/web2py/browse_thread/thread/27b14433976c0540
"""
def __init__(self, dbset, field, this,
error_message = "value already in database!"):
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
self.value = this
self.error_message = error_message
self.record_id = 0
def set_self_id(self, id):
self.record_id = id
def __call__(self, value):
tablename, fieldname = str(self.field).split(".")
field = self.dbset._db[tablename][fieldname]
rows = self.dbset(field == self.value).select(limitby=(0, 1))
if len(rows)>0 and str(rows[0].id) != str(self.record_id):
return (self.value, self.error_message)
return (value, None)
regex1 = re.compile("[\w_]+\.[\w_]+")
regex2 = re.compile("%\((?P<name>[^\)]+)\)s")
# IS_ONE_OF_EMPTY -------------------------------------------------------------------
# by sunneach 2010-02-03
# copy of nursix's IS_ONE_OF with removed 'options' method
class IS_ONE_OF_EMPTY(Validator):
"""
Filtered version of IS_IN_DB():
validates a given value as key of another table, filtered by the 'filterby'
field for one of the 'filter_opts' options (=a selective IS_IN_DB())
NB Filtering isn't active in GQL.
For the dropdown representation:
'label' can be a string template for the record, or a set of field
names of the fields to be used as option labels, or a function or lambda
to create an option label from the respective record (which has to return
a string, of course). The function will take the record as an argument
No 'options' method as designed to be called next to an Autocomplete field so don't download a large dropdown unnecessarily.
"""
def __init__(
self,
dbset,
field,
label=None,
filterby=None,
filter_opts=None,
error_message="invalid value!",
orderby=None,
groupby=None,
cache=None,
multiple=False,
zero="",
sort=False,
_and=None,
):
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
self.field = field
(ktable, kfield) = str(self.field).split(".")
if not label:
label = "%%(%s)s" % kfield
if isinstance(label, str):
if regex1.match(str(label)):
label = "%%(%s)s" % str(label).split(".")[-1]
ks = regex2.findall(label)
if not kfield in ks:
ks += [kfield]
fields = ["%s.%s" % (ktable, k) for k in ks]
else:
ks = [kfield]
fields =[str(f) for f in self.dbset._db[ktable]]
self.fields = fields
self.label = label
self.ktable = ktable
if not kfield or not len(kfield):
self.kfield = "id"
else:
self.kfield = kfield
self.ks = ks
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.cache = cache
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
self.filterby = filterby
self.filter_opts = filter_opts
def set_self_id(self, id):
if self._and:
self._and.record_id = id
def build_set(self):
if self.ktable in self.dbset._db:
_table = self.dbset._db[self.ktable]
if self.dbset._db._dbname != "gql":
orderby = self.orderby or ", ".join(self.fields)
groupby = self.groupby
dd = dict(orderby=orderby, groupby=groupby, cache=self.cache)
if "deleted" in _table:
query = (_table["deleted"] == False)
else:
query = (_table["id"] > 0)
if self.filterby and self.filterby in _table:
if self.filter_opts:
query = query & (_table[self.filterby].belongs(self.filter_opts))
if not self.orderby:
dd.update(orderby=_table[self.filterby])
records = self.dbset(query).select(*self.fields, **dd)
else:
import contrib.gql
orderby = self.orderby\
or contrib.gql.SQLXorable("|".join([k for k in self.ks
if k != "id"]))
dd = dict(orderby=orderby, cache=self.cache)
records = \
self.dbset.select(self.dbset._db[self.ktable].ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
#labels = []
label = self.label
try:
labels = map(label, records)
except TypeError:
if isinstance(label, str):
labels = map(lambda r: label % dict(r), records)
elif isinstance(label, (list, tuple)):
labels = map(lambda r: \
" ".join([r[l] for l in label if l in r]),
records)
elif hasattr(label, '__call__'):
# Is a function
labels = map(label, records)
elif "name" in _table:
labels = map(lambda r: r.name, records)
else:
labels = map(lambda r: r[self.kfield], records)
self.labels = labels
else:
self.theset = None
self.labels = None
#def options(self):
# "Removed as we don't want any options downloaded unnecessarily"
def __call__(self, value):
try:
_table = self.dbset._db[self.ktable]
deleted_q = ("deleted" in _table) and (_table["deleted"] == False) or False
filter_opts_q = False
if self.filterby and self.filterby in _table:
if self.filter_opts:
filter_opts_q = _table[self.filterby].belongs(self.filter_opts)
if self.multiple:
if isinstance(value, list):
values = value
elif isinstance(value, basestring) and \
value[0] == "|" and value[-1] == "|":
values = value[1:-1].split("|")
elif value:
values = [value]
else:
values = []
if self.theset:
if not [x for x in values if not x in self.theset]:
return ("|%s|" % "|".join(values), None)
else:
return (value, self.error_message)
else:
for v in values:
q = (_table[self.kfield] == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if self.dbset(query).count() < 1:
return (value, self.error_message)
return ("|%s|" % "|".join(values), None)
elif self.theset:
if value in self.theset:
if self._and:
return self._and(value)
else:
return (value, None)
else:
values = [value]
query = None
for v in values:
q = (_table[self.kfield] == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if self.dbset(query).count():
if self._and:
return self._and(value)
else:
return (value, None)
except:
pass
return (value, self.error_message)
# IS_ONE_OF -------------------------------------------------------------------
# added 2009-08-23 by nursix
# converted to subclass 2010-02-03 by sunneach: NO CHANGES in the method bodies
class IS_ONE_OF(IS_ONE_OF_EMPTY):
"""
Extends IS_ONE_OF_EMPTY by restoring the 'options' method.
"""
def options(self):
self.build_set()
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if self.zero != None and not self.multiple:
items.insert(0,("", self.zero))
return items
# -----------------------------------------------------------------------------
class IS_NOT_ONE_OF(IS_NOT_IN_DB):
"""
Filtered version of IS_NOT_IN_DB()
- understands the 'deleted' field.
- makes the field unique (amongst non-deleted field)
Example:
- INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table))
"""
def __call__(self, value):
if value in self.allowed_override:
return (value, None)
(tablename, fieldname) = str(self.field).split(".")
_table = self.dbset._db[tablename]
field = _table[fieldname]
query = (field == value)
if "deleted" in _table:
query = (_table["deleted"] == False) & query
rows = self.dbset(query).select(limitby=(0, 1))
if len(rows) > 0 and str(rows[0].id) != str(self.record_id):
return (value, self.error_message)
return (value, None)
# -----------------------------------------------------------------------------
class IS_UTC_OFFSET(Validator):
"""
Validates a given string value as UTC offset in the format +/-HHMM
@author: nursix
@param error_message: the error message to be returned
@note:
all leading parts of the string (before the trailing offset specification)
will be ignored and replaced by 'UTC ' in the return value, if the string
passes through.
"""
def __init__(self,
error_message="invalid UTC offset!"
):
self.error_message = error_message
@staticmethod
def get_offset_value(offset_str):
if offset_str and len(offset_str) >= 5 and \
(offset_str[-5] == "+" or offset_str[-5] == "-") and \
offset_str[-4:].isdigit():
offset_hrs = int(offset_str[-5] + offset_str[-4:-2])
offset_min = int(offset_str[-5] + offset_str[-2:])
offset = 3600*offset_hrs + 60*offset_min
return offset
else:
return None
def __call__(self,value):
if value and isinstance(value, str):
_offset_str = value.strip()
offset = self.get_offset_value(_offset_str)
if offset is not None and offset>-86340 and offset <86340:
# Add a leading 'UTC ',
# otherwise leading '+' and '0' will be stripped away by web2py
return ("UTC " + _offset_str[-5:], None)
return (value, self.error_message)
# -----------------------------------------------------------------------------
#
class IS_UTC_DATETIME(Validator):
"""
Validates a given value as datetime string and returns the corresponding
UTC datetime.
Example:
- INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME())
@author: nursix
@param format: strptime/strftime format template string, for
directives refer to your strptime implementation
@param error_message: dict of error messages to be returned
@param utc_offset: offset to UTC in seconds, if not specified, the value
is considered to be UTC
@param allow_future: whether future date/times are allowed or not, if
set to False, all date/times beyond now+max_future
will fail
@type allow_future: boolean
@param max_future: the maximum acceptable future time interval in
seconds from now for unsynchronized local clocks
@note:
datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss, with an
optional trailing UTC offset specified as +/-HHMM (+ for eastern, - for
western timezones)
"""
isodatetime = "%Y-%m-%d %H:%M:%S"
def __init__(self,
format=None,
error_message=None,
utc_offset=None,
allow_future=True,
max_future=900):
self.format = format or self.isodatetime
self.error_message = dict(
format = "Required format: YYYY-MM-DD HH:MM:SS!",
offset = "Invalid UTC offset!",
future = "Future times not allowed!")
if error_message and isinstance(error_message, dict):
self.error_message["format"] = error_message.get("format", None) or self.error_message["format"]
self.error_message["offset"] = error_message.get("offset", None) or self.error_message["offset"]
self.error_message["future"] = error_message.get("future", None) or self.error_message["future"]
elif error_message:
self.error_message["format"] = error_message
validate = IS_UTC_OFFSET()
offset, error = validate(utc_offset)
if error:
self.utc_offset = "UTC +0000" # fallback to UTC
else:
self.utc_offset = offset
self.allow_future = allow_future
self.max_future = max_future
def __call__(self, value):
_dtstr = value.strip()
if len(_dtstr) > 6 and \
(_dtstr[-6:-4] == " +" or _dtstr[-6:-4] == " -") and \
_dtstr[-4:].isdigit():
# UTC offset specified in dtstr
dtstr = _dtstr[0:-6]
_offset_str = _dtstr[-5:]
else:
# use default UTC offset
dtstr = _dtstr
_offset_str = self.utc_offset
offset_hrs = int(_offset_str[-5] + _offset_str[-4:-2])
offset_min = int(_offset_str[-5] + _offset_str[-2:])
offset = 3600 * offset_hrs + 60 * offset_min
# Offset must be in range -1439 to +1439 minutes
if offset < -86340 or offset > 86340:
return (dt, self.error_message["offset"])
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr, str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr+":00", str(self.format))
dt = datetime(y, m, d, hh, mm, ss)
except:
return(value, self.error_message["format"])
if self.allow_future:
return (dt, None)
else:
latest = datetime.utcnow() + timedelta(seconds=self.max_future)
dt_utc = dt - timedelta(seconds=offset)
if dt_utc > latest:
return (dt_utc, self.error_message["future"])
else:
return (dt_utc, None)
def formatter(self, value):
# Always format with trailing UTC offset
return value.strftime(str(self.format)) + " +0000"
# -----------------------------------------------------------------------------
class IS_ACL(IS_IN_SET):
"""
Validator for ACLs
@attention: Incomplete! Does not validate yet, but just convert.
@author: Dominic König <dominic@aidiq.com>
"""
def __call__(self, value):
"""
Validation
@param value: the value to validate
"""
if not isinstance(value, (list, tuple)):
value = [value]
acl = 0x0000
for v in value:
try:
flag = int(v)
except (ValueError, TypeError):
flag = 0x0000
else:
acl |= flag
return (acl, None)
# -----------------------------------------------------------------------------
|
mit
| 7,582,023,155,462,627,000
| 33.526316
| 132
| 0.511703
| false
| 4.105794
| false
| false
| false
|
fakusb/FiVES-Nao-Visualisation
|
WebClient/resources/models/v11/modify.py
|
1
|
21395
|
# vim: fenc=utf-8 foldmethod=marker
# call this script with nao-dummy.html as argument.
import os
import sys
import math
from subprocess import call, Popen, PIPE
if not len(sys.argv) > 1:
print("No file argument given.")
sys.exit()
infile = sys.argv[1]
if not os.path.isfile(infile):
print("No valid file argument given.")
sys.exit()
vals = {}
HeadYaw = 42
HeadPitch = -23
RShoulderPitch = -50
RShoulderRoll = -50
RElbowRoll = 60
RElbowYaw = 45
RWristYaw = 68
RHand = 0
LShoulderPitch = 0
LShoulderRoll = 0
LElbowYaw = 0
LElbowRoll = -40
LWristYaw = 50
LHand = 0
RHipYawPitch = -65
RHipPitch = -19
RHipRoll = 13
RKneePitch = 55
RAnklePitch = -16
RAnkleRoll = 0
LHipYawPitch = -65
LHipPitch = 0
LHipRoll = 23
LKneePitch = 0
LAnklePitch = 13
LAnkleRoll = -23
fmtstr = "{:.6f}"
# chest & head {{{
vals['chest_1_1'] = '0.010000'
vals['chest_1_2'] = '0.000000'
vals['chest_1_3'] = '0.000000'
vals['chest_2_1'] = '0.000000'
vals['chest_2_2'] = '0.010000'
vals['chest_2_3'] = '0.000000'
vals['chest_3_1'] = '0.000000'
vals['chest_3_2'] = '0.000000'
vals['chest_3_3'] = '0.010000'
vals['neck_1_1'] = fmtstr.format(math.cos(math.radians(-HeadYaw)))#'1.000000'
vals['neck_1_2'] = fmtstr.format(-math.sin(math.radians(-HeadYaw)))#'0.000000'
vals['neck_1_3'] = '0.000000'
vals['neck_2_1'] = fmtstr.format(math.sin(math.radians(-HeadYaw)))#'0.000000'
vals['neck_2_2'] = fmtstr.format(math.cos(math.radians(-HeadYaw)))#'1.000000'
vals['neck_2_3'] = '0.000000'
vals['neck_3_1'] = '0.000000'
vals['neck_3_2'] = '0.000000'
vals['neck_3_3'] = '1.000000'
vals['head_1_1'] = fmtstr.format(math.cos(math.radians(-HeadPitch)))#'1.000000'
vals['head_1_2'] = '0.000000'
vals['head_1_3'] = fmtstr.format(math.sin(math.radians(-HeadPitch)))#'0.000000'
vals['head_2_1'] = '0.000000'
vals['head_2_2'] = '1.000000'
vals['head_2_3'] = '0.000000'
vals['head_3_1'] = fmtstr.format(-math.sin(math.radians(-HeadPitch)))#'0.000000'
vals['head_3_2'] = '0.000000'
vals['head_3_3'] = fmtstr.format(math.cos(math.radians(-HeadPitch)))#'1.000000'
# }}}
# right arm {{{
vals['rshoulder_1_1'] = fmtstr.format(math.cos(math.radians(-RShoulderPitch)))#'1.000000'
vals['rshoulder_1_2'] = '0.000000'
vals['rshoulder_1_3'] = fmtstr.format(math.sin(math.radians(-RShoulderPitch)))#'0.000000'
vals['rshoulder_2_1'] = '0.000000'
vals['rshoulder_2_2'] = '1.000000'
vals['rshoulder_2_3'] = '0.000000'
vals['rshoulder_3_1'] = fmtstr.format(-math.sin(math.radians(-RShoulderPitch)))#'0.000000'
vals['rshoulder_3_2'] = '0.000000'
vals['rshoulder_3_3'] = fmtstr.format(math.cos(math.radians(-RShoulderPitch)))#'1.000000'
vals['rbiceps_1_1'] = fmtstr.format(math.cos(math.radians(-RShoulderRoll)))#'1.000000'
vals['rbiceps_1_2'] = fmtstr.format(-math.sin(math.radians(-RShoulderRoll)))#'0.000000'
vals['rbiceps_1_3'] = '0.000000'
vals['rbiceps_2_1'] = fmtstr.format(math.sin(math.radians(-RShoulderRoll)))#'0.000000'
vals['rbiceps_2_2'] = fmtstr.format(math.cos(math.radians(-RShoulderRoll)))#'1.000000'
vals['rbiceps_2_3'] = '0.000000'
vals['rbiceps_3_1'] = '0.000000'
vals['rbiceps_3_2'] = '0.000000'
vals['rbiceps_3_3'] = '1.000000'
rym11 = 1.0
rym12 = 0.0
rym13 = 0.0
rym21 = 0.0
rym22 = math.cos(math.radians(-RElbowYaw))
rym23 = -math.sin(math.radians(-RElbowYaw))
rym31 = 0.0
rym32 = math.sin(math.radians(-RElbowYaw))
rym33 = math.cos(math.radians(-RElbowYaw))
rrm11 = math.cos(math.radians(-RElbowRoll))
rrm12 = -math.sin(math.radians(-RElbowRoll))
rrm13 = 0.0
rrm21 = math.sin(math.radians(-RElbowRoll))
rrm22 = math.cos(math.radians(-RElbowRoll))
rrm23 = 0.0
rrm31 = 0.0
rrm32 = 0.0
rrm33 = 1.0
# first yaw, then roll
vals['rforearm_1_1'] = fmtstr.format(rrm11*rym11+rrm12*rym21+rrm13*rym31)###'1.000000'
vals['rforearm_1_2'] = fmtstr.format(rrm11*rym12+rrm12*rym22+rrm13*rym32)###'0.000000'
vals['rforearm_1_3'] = fmtstr.format(rrm11*rym13+rrm12*rym23+rrm13*rym33)###'0.000000'
vals['rforearm_2_1'] = fmtstr.format(rrm21*rym11+rrm22*rym21+rrm23*rym31)###'0.000000'
vals['rforearm_2_2'] = fmtstr.format(rrm21*rym12+rrm22*rym22+rrm23*rym32)###'1.000000'
vals['rforearm_2_3'] = fmtstr.format(rrm21*rym13+rrm22*rym23+rrm23*rym33)###'0.000000'
vals['rforearm_3_1'] = fmtstr.format(rrm31*rym11+rrm32*rym21+rrm33*rym31)###'0.000000'
vals['rforearm_3_2'] = fmtstr.format(rrm31*rym12+rrm32*rym22+rrm33*rym32)###'0.000000'
vals['rforearm_3_3'] = fmtstr.format(rrm31*rym13+rrm32*rym23+rrm33*rym33)###'1.000000'
vals['rhand_1_1'] = '1.000000'
vals['rhand_1_2'] = '0.000000'
vals['rhand_1_3'] = '0.000000'
vals['rhand_2_1'] = '0.000000'
vals['rhand_2_2'] = fmtstr.format(math.cos(math.radians(-RWristYaw)))#'1.000000'
vals['rhand_2_3'] = fmtstr.format(-math.sin(math.radians(-RWristYaw)))#'0.000000'
vals['rhand_3_1'] = '0.000000'
vals['rhand_3_2'] = fmtstr.format(math.sin(math.radians(-RWristYaw)))#'0.000000'
vals['rhand_3_3'] = fmtstr.format(math.cos(math.radians(-RWristYaw)))#'1.000000'
vals['rphalanx7_1_1'] = '1.000000'
vals['rphalanx7_1_2'] = '0.000000'
vals['rphalanx7_1_3'] = '0.000000'
vals['rphalanx7_2_1'] = '0.000000'
vals['rphalanx7_2_2'] = '1.000000'
vals['rphalanx7_2_3'] = '0.000000'
vals['rphalanx7_3_1'] = '0.000000'
vals['rphalanx7_3_2'] = '0.000000'
vals['rphalanx7_3_3'] = '1.000000'
vals['rphalanx8_1_1'] = '1.000000'
vals['rphalanx8_1_2'] = '0.000000'
vals['rphalanx8_1_3'] = '0.000000'
vals['rphalanx8_2_1'] = '0.000000'
vals['rphalanx8_2_2'] = '1.000000'
vals['rphalanx8_2_3'] = '0.000000'
vals['rphalanx8_3_1'] = '0.000000'
vals['rphalanx8_3_2'] = '0.000000'
vals['rphalanx8_3_3'] = '1.000000'
vals['rphalanx4_1_1'] = '1.000000'
vals['rphalanx4_1_2'] = '0.000000'
vals['rphalanx4_1_3'] = '0.000000'
vals['rphalanx4_2_1'] = '0.000000'
vals['rphalanx4_2_2'] = '1.000000'
vals['rphalanx4_2_3'] = '0.000000'
vals['rphalanx4_3_1'] = '0.000000'
vals['rphalanx4_3_2'] = '0.000000'
vals['rphalanx4_3_3'] = '1.000000'
vals['rphalanx5_1_1'] = '1.000000'
vals['rphalanx5_1_2'] = '0.000000'
vals['rphalanx5_1_3'] = '0.000000'
vals['rphalanx5_2_1'] = '0.000000'
vals['rphalanx5_2_2'] = '1.000000'
vals['rphalanx5_2_3'] = '0.000000'
vals['rphalanx5_3_1'] = '0.000000'
vals['rphalanx5_3_2'] = '0.000000'
vals['rphalanx5_3_3'] = '1.000000'
vals['rphalanx6_1_1'] = '1.000000'
vals['rphalanx6_1_2'] = '0.000000'
vals['rphalanx6_1_3'] = '0.000000'
vals['rphalanx6_2_1'] = '0.000000'
vals['rphalanx6_2_2'] = '1.000000'
vals['rphalanx6_2_3'] = '0.000000'
vals['rphalanx6_3_1'] = '0.000000'
vals['rphalanx6_3_2'] = '0.000000'
vals['rphalanx6_3_3'] = '1.000000'
vals['rphalanx1_1_1'] = '1.000000'
vals['rphalanx1_1_2'] = '0.000000'
vals['rphalanx1_1_3'] = '0.000000'
vals['rphalanx1_2_1'] = '0.000000'
vals['rphalanx1_2_2'] = '1.000000'
vals['rphalanx1_2_3'] = '0.000000'
vals['rphalanx1_3_1'] = '0.000000'
vals['rphalanx1_3_2'] = '0.000000'
vals['rphalanx1_3_3'] = '1.000000'
vals['rphalanx2_1_1'] = '1.000000'
vals['rphalanx2_1_2'] = '0.000000'
vals['rphalanx2_1_3'] = '0.000000'
vals['rphalanx2_2_1'] = '0.000000'
vals['rphalanx2_2_2'] = '1.000000'
vals['rphalanx2_2_3'] = '0.000000'
vals['rphalanx2_3_1'] = '0.000000'
vals['rphalanx2_3_2'] = '0.000000'
vals['rphalanx2_3_3'] = '1.000000'
vals['rphalanx3_1_1'] = '1.000000'
vals['rphalanx3_1_2'] = '0.000000'
vals['rphalanx3_1_3'] = '0.000000'
vals['rphalanx3_2_1'] = '0.000000'
vals['rphalanx3_2_2'] = '1.000000'
vals['rphalanx3_2_3'] = '0.000000'
vals['rphalanx3_3_1'] = '0.000000'
vals['rphalanx3_3_2'] = '0.000000'
vals['rphalanx3_3_3'] = '1.000000'
# }}}
# left arm {{{
vals['lshoulder_1_1'] = fmtstr.format(math.cos(math.radians(-LShoulderPitch)))#'1.000000'
vals['lshoulder_1_2'] = '0.000000'
vals['lshoulder_1_3'] = fmtstr.format(math.sin(math.radians(-LShoulderPitch)))#'0.000000'
vals['lshoulder_2_1'] = '0.000000'
vals['lshoulder_2_2'] = '1.000000'
vals['lshoulder_2_3'] = '0.000000'
vals['lshoulder_3_1'] = fmtstr.format(-math.sin(math.radians(-LShoulderPitch)))#'0.000000'
vals['lshoulder_3_2'] = '0.000000'
vals['lshoulder_3_3'] = fmtstr.format(math.cos(math.radians(-LShoulderPitch)))#'1.000000'
vals['lbiceps_1_1'] = fmtstr.format(math.cos(math.radians(-LShoulderRoll)))#'1.000000'
vals['lbiceps_1_2'] = fmtstr.format(-math.sin(math.radians(-LShoulderRoll)))#'0.000000'
vals['lbiceps_1_3'] = '0.000000'
vals['lbiceps_2_1'] = fmtstr.format(math.sin(math.radians(-LShoulderRoll)))#'0.000000'
vals['lbiceps_2_2'] = fmtstr.format(math.cos(math.radians(-LShoulderRoll)))#'1.000000'
vals['lbiceps_2_3'] = '0.000000'
vals['lbiceps_3_1'] = '0.000000'
vals['lbiceps_3_2'] = '0.000000'
vals['lbiceps_3_3'] = '1.000000'
lym11 = 1.0
lym12 = 0.0
lym13 = 0.0
lym21 = 0.0
lym22 = math.cos(math.radians(-LElbowYaw))
lym23 = -math.sin(math.radians(-LElbowYaw))
lym31 = 0.0
lym32 = math.sin(math.radians(-LElbowYaw))
lym33 = math.cos(math.radians(-LElbowYaw))
lrm11 = math.cos(math.radians(-LElbowRoll))
lrm12 = -math.sin(math.radians(-LElbowRoll))
lrm13 = 0.0
lrm21 = math.sin(math.radians(-LElbowRoll))
lrm22 = math.cos(math.radians(-LElbowRoll))
lrm23 = 0.0
lrm31 = 0.0
lrm32 = 0.0
lrm33 = 1.0
# first yaw, then roll
vals['lforearm_1_1'] = fmtstr.format(lrm11*lym11+lrm12*lym21+lrm13*lym31)###'1.000000'
vals['lforearm_1_2'] = fmtstr.format(lrm11*lym12+lrm12*lym22+lrm13*lym32)###'0.000000'
vals['lforearm_1_3'] = fmtstr.format(lrm11*lym13+lrm12*lym23+lrm13*lym33)###'0.000000'
vals['lforearm_2_1'] = fmtstr.format(lrm21*lym11+lrm22*lym21+lrm23*lym31)###'0.000000'
vals['lforearm_2_2'] = fmtstr.format(lrm21*lym12+lrm22*lym22+lrm23*lym32)###'1.000000'
vals['lforearm_2_3'] = fmtstr.format(lrm21*lym13+lrm22*lym23+lrm23*lym33)###'0.000000'
vals['lforearm_3_1'] = fmtstr.format(lrm31*lym11+lrm32*lym21+lrm33*lym31)###'0.000000'
vals['lforearm_3_2'] = fmtstr.format(lrm31*lym12+lrm32*lym22+lrm33*lym32)###'0.000000'
vals['lforearm_3_3'] = fmtstr.format(lrm31*lym13+lrm32*lym23+lrm33*lym33)###'1.000000'
vals['lhand_1_1'] = '1.000000'
vals['lhand_1_2'] = '0.000000'
vals['lhand_1_3'] = '0.000000'
vals['lhand_2_1'] = '0.000000'
vals['lhand_2_2'] = fmtstr.format(math.cos(math.radians(-LWristYaw)))#'1.000000'
vals['lhand_2_3'] = fmtstr.format(-math.sin(math.radians(-LWristYaw)))#'0.000000'
vals['lhand_3_1'] = '0.000000'
vals['lhand_3_2'] = fmtstr.format(math.sin(math.radians(-LWristYaw)))#'0.000000'
vals['lhand_3_3'] = fmtstr.format(math.cos(math.radians(-LWristYaw)))#'1.000000'
vals['lphalanx7_1_1'] = '1.000000'
vals['lphalanx7_1_2'] = '0.000000'
vals['lphalanx7_1_3'] = '0.000000'
vals['lphalanx7_2_1'] = '0.000000'
vals['lphalanx7_2_2'] = '1.000000'
vals['lphalanx7_2_3'] = '0.000000'
vals['lphalanx7_3_1'] = '0.000000'
vals['lphalanx7_3_2'] = '0.000000'
vals['lphalanx7_3_3'] = '1.000000'
vals['lphalanx8_1_1'] = '1.000000'
vals['lphalanx8_1_2'] = '0.000000'
vals['lphalanx8_1_3'] = '0.000000'
vals['lphalanx8_2_1'] = '0.000000'
vals['lphalanx8_2_2'] = '1.000000'
vals['lphalanx8_2_3'] = '0.000000'
vals['lphalanx8_3_1'] = '0.000000'
vals['lphalanx8_3_2'] = '0.000000'
vals['lphalanx8_3_3'] = '1.000000'
vals['lphalanx4_1_1'] = '1.000000'
vals['lphalanx4_1_2'] = '0.000000'
vals['lphalanx4_1_3'] = '0.000000'
vals['lphalanx4_2_1'] = '0.000000'
vals['lphalanx4_2_2'] = '1.000000'
vals['lphalanx4_2_3'] = '0.000000'
vals['lphalanx4_3_1'] = '0.000000'
vals['lphalanx4_3_2'] = '0.000000'
vals['lphalanx4_3_3'] = '1.000000'
vals['lphalanx5_1_1'] = '1.000000'
vals['lphalanx5_1_2'] = '0.000000'
vals['lphalanx5_1_3'] = '0.000000'
vals['lphalanx5_2_1'] = '0.000000'
vals['lphalanx5_2_2'] = '1.000000'
vals['lphalanx5_2_3'] = '0.000000'
vals['lphalanx5_3_1'] = '0.000000'
vals['lphalanx5_3_2'] = '0.000000'
vals['lphalanx5_3_3'] = '1.000000'
vals['lphalanx6_1_1'] = '1.000000'
vals['lphalanx6_1_2'] = '0.000000'
vals['lphalanx6_1_3'] = '0.000000'
vals['lphalanx6_2_1'] = '0.000000'
vals['lphalanx6_2_2'] = '1.000000'
vals['lphalanx6_2_3'] = '0.000000'
vals['lphalanx6_3_1'] = '0.000000'
vals['lphalanx6_3_2'] = '0.000000'
vals['lphalanx6_3_3'] = '1.000000'
vals['lphalanx1_1_1'] = '1.000000'
vals['lphalanx1_1_2'] = '0.000000'
vals['lphalanx1_1_3'] = '0.000000'
vals['lphalanx1_2_1'] = '0.000000'
vals['lphalanx1_2_2'] = '1.000000'
vals['lphalanx1_2_3'] = '0.000000'
vals['lphalanx1_3_1'] = '0.000000'
vals['lphalanx1_3_2'] = '0.000000'
vals['lphalanx1_3_3'] = '1.000000'
vals['lphalanx2_1_1'] = '1.000000'
vals['lphalanx2_1_2'] = '0.000000'
vals['lphalanx2_1_3'] = '0.000000'
vals['lphalanx2_2_1'] = '0.000000'
vals['lphalanx2_2_2'] = '1.000000'
vals['lphalanx2_2_3'] = '0.000000'
vals['lphalanx2_3_1'] = '0.000000'
vals['lphalanx2_3_2'] = '0.000000'
vals['lphalanx2_3_3'] = '1.000000'
vals['lphalanx3_1_1'] = '1.000000'
vals['lphalanx3_1_2'] = '0.000000'
vals['lphalanx3_1_3'] = '0.000000'
vals['lphalanx3_2_1'] = '0.000000'
vals['lphalanx3_2_2'] = '1.000000'
vals['lphalanx3_2_3'] = '0.000000'
vals['lphalanx3_3_1'] = '0.000000'
vals['lphalanx3_3_2'] = '0.000000'
vals['lphalanx3_3_3'] = '1.000000'
# }}}
# right leg {{{
rhux = 0
rhuy = -1/math.sqrt(2)
rhuz = -1/math.sqrt(2)
rhl11 = math.cos(math.radians(RHipYawPitch))
# no - here!
rhl12 = math.sin(math.radians(RHipYawPitch)) * (-rhuz)
rhl13 = math.sin(math.radians(RHipYawPitch)) * (rhuy)
rhl21 = math.sin(math.radians(RHipYawPitch)) * (rhuz)
rhl22 = math.cos(math.radians(RHipYawPitch))
rhl23 = math.sin(math.radians(RHipYawPitch)) * (-rhux)
rhl31 = math.sin(math.radians(RHipYawPitch)) * (-rhuy)
rhl32 = math.sin(math.radians(RHipYawPitch)) * (rhux)
rhl33 = math.cos(math.radians(RHipYawPitch))
rhr11 = (1 - math.cos(math.radians(RHipYawPitch))) * rhux * rhux
rhr12 = (1 - math.cos(math.radians(RHipYawPitch))) * rhux * rhuy
rhr13 = (1 - math.cos(math.radians(RHipYawPitch))) * rhux * rhuz
rhr21 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuy * rhux
rhr22 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuy * rhuy
rhr23 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuy * rhuz
rhr31 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuz * rhux
rhr32 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuz * rhuy
rhr33 = (1 - math.cos(math.radians(RHipYawPitch))) * rhuz * rhuz
vals['rhip_1_1'] = fmtstr.format(rhl11 + rhr11)#'1.000000'
vals['rhip_1_2'] = fmtstr.format(rhl12 + rhr12)#'0.000000'
vals['rhip_1_3'] = fmtstr.format(rhl13 + rhr13)#'0.000000'
vals['rhip_2_1'] = fmtstr.format(rhl21 + rhr21)#'0.000000'
vals['rhip_2_2'] = fmtstr.format(rhl22 + rhr22)#'1.000000'
vals['rhip_2_3'] = fmtstr.format(rhl23 + rhr23)#'0.000000'
vals['rhip_3_1'] = fmtstr.format(rhl31 + rhr31)#'0.000000'
vals['rhip_3_2'] = fmtstr.format(rhl32 + rhr32)#'0.000000'
vals['rhip_3_3'] = fmtstr.format(rhl33 + rhr33)#'1.000000'
vals['rupperthigh_1_1'] = '1.000000'
vals['rupperthigh_1_2'] = '0.000000'
vals['rupperthigh_1_3'] = '0.000000'
vals['rupperthigh_2_1'] = '0.000000'
vals['rupperthigh_2_2'] = fmtstr.format(math.cos(math.radians(-RHipRoll)))#'1.000000'
vals['rupperthigh_2_3'] = fmtstr.format(-math.sin(math.radians(-RHipRoll)))#'0.000000'
vals['rupperthigh_3_1'] = '0.000000'
vals['rupperthigh_3_2'] = fmtstr.format(math.sin(math.radians(-RHipRoll)))#'0.000000'
vals['rupperthigh_3_3'] = fmtstr.format(math.cos(math.radians(-RHipRoll)))#'1.000000'
vals['rthigh_1_1'] = fmtstr.format(math.cos(math.radians(-RHipPitch)))#'1.000000'
vals['rthigh_1_2'] = '0.000000'
vals['rthigh_1_3'] = fmtstr.format(math.sin(math.radians(-RHipPitch)))#'0.000000'
vals['rthigh_2_1'] = '0.000000'
vals['rthigh_2_2'] = '1.000000'
vals['rthigh_2_3'] = '0.000000'
vals['rthigh_3_1'] = fmtstr.format(-math.sin(math.radians(-RHipPitch)))#'0.000000'
vals['rthigh_3_2'] = '0.000000'
vals['rthigh_3_3'] = fmtstr.format(math.cos(math.radians(-RHipPitch)))#'1.000000'
vals['rshinebone_1_1'] = fmtstr.format(math.cos(math.radians(-RKneePitch)))#'1.000000'
vals['rshinebone_1_2'] = '0.000000'
vals['rshinebone_1_3'] = fmtstr.format(math.sin(math.radians(-RKneePitch)))#'0.000000'
vals['rshinebone_2_1'] = '0.000000'
vals['rshinebone_2_2'] = '1.000000'
vals['rshinebone_2_3'] = '0.000000'
vals['rshinebone_3_1'] = fmtstr.format(-math.sin(math.radians(-RKneePitch)))#'0.000000'
vals['rshinebone_3_2'] = '0.000000'
vals['rshinebone_3_3'] = fmtstr.format(math.cos(math.radians(-RKneePitch)))#'1.000000'
vals['rankle_1_1'] = fmtstr.format(math.cos(math.radians(-RAnklePitch)))#'1.000000'
vals['rankle_1_2'] = '0.000000'
vals['rankle_1_3'] = fmtstr.format(math.sin(math.radians(-RAnklePitch)))#'0.000000'
vals['rankle_2_1'] = '0.000000'
vals['rankle_2_2'] = '1.000000'
vals['rankle_2_3'] = '0.000000'
vals['rankle_3_1'] = fmtstr.format(-math.sin(math.radians(-RAnklePitch)))#'0.000000'
vals['rankle_3_2'] = '0.000000'
vals['rankle_3_3'] = fmtstr.format(math.cos(math.radians(-RAnklePitch)))#'1.000000'
vals['rfoot_1_1'] = '1.000000'
vals['rfoot_1_2'] = '0.000000'
vals['rfoot_1_3'] = '0.000000'
vals['rfoot_2_1'] = '0.000000'
vals['rfoot_2_2'] = fmtstr.format(math.cos(math.radians(-RAnkleRoll)))#'1.000000'
vals['rfoot_2_3'] = fmtstr.format(-math.sin(math.radians(-RAnkleRoll)))#'0.000000'
vals['rfoot_3_1'] = '0.000000'
vals['rfoot_3_2'] = fmtstr.format(math.sin(math.radians(-RAnkleRoll)))#'0.000000'
vals['rfoot_3_3'] = fmtstr.format(math.cos(math.radians(-RAnkleRoll)))#'1.000000'
# }}}
# left leg {{{
lhux = 0
lhuy = 1/math.sqrt(2)
lhuz = -1/math.sqrt(2)
lhl11 = math.cos(math.radians(-LHipYawPitch))
lhl12 = math.sin(math.radians(-LHipYawPitch)) * (-lhuz)
lhl13 = math.sin(math.radians(-LHipYawPitch)) * (lhuy)
lhl21 = math.sin(math.radians(-LHipYawPitch)) * (lhuz)
lhl22 = math.cos(math.radians(-LHipYawPitch))
lhl23 = math.sin(math.radians(-LHipYawPitch)) * (-lhux)
lhl31 = math.sin(math.radians(-LHipYawPitch)) * (-lhuy)
lhl32 = math.sin(math.radians(-LHipYawPitch)) * (lhux)
lhl33 = math.cos(math.radians(-LHipYawPitch))
lhr11 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhux * lhux
lhr12 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhux * lhuy
lhr13 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhux * lhuz
lhr21 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuy * lhux
lhr22 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuy * lhuy
lhr23 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuy * lhuz
lhr31 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuz * lhux
lhr32 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuz * lhuy
lhr33 = (1 - math.cos(math.radians(-LHipYawPitch))) * lhuz * lhuz
vals['lhip_1_1'] = fmtstr.format(lhl11 + lhr11)#'1.000000'
vals['lhip_1_2'] = fmtstr.format(lhl12 + lhr12)#'0.000000'
vals['lhip_1_3'] = fmtstr.format(lhl13 + lhr13)#'0.000000'
vals['lhip_2_1'] = fmtstr.format(lhl21 + lhr21)#'0.000000'
vals['lhip_2_2'] = fmtstr.format(lhl22 + lhr22)#'1.000000'
vals['lhip_2_3'] = fmtstr.format(lhl23 + lhr23)#'0.000000'
vals['lhip_3_1'] = fmtstr.format(lhl31 + lhr31)#'0.000000'
vals['lhip_3_2'] = fmtstr.format(lhl32 + lhr32)#'0.000000'
vals['lhip_3_3'] = fmtstr.format(lhl33 + lhr33)#'1.000000'
vals['lupperthigh_1_1'] = '1.000000'
vals['lupperthigh_1_2'] = '0.000000'
vals['lupperthigh_1_3'] = '0.000000'
vals['lupperthigh_2_1'] = '0.000000'
vals['lupperthigh_2_2'] = fmtstr.format(math.cos(math.radians(-LHipRoll)))#'1.000000'
vals['lupperthigh_2_3'] = fmtstr.format(-math.sin(math.radians(-LHipRoll)))#'0.000000'
vals['lupperthigh_3_1'] = '0.000000'
vals['lupperthigh_3_2'] = fmtstr.format(math.sin(math.radians(-LHipRoll)))#'0.000000'
vals['lupperthigh_3_3'] = fmtstr.format(math.cos(math.radians(-LHipRoll)))#'1.000000'
vals['lthigh_1_1'] = fmtstr.format(math.cos(math.radians(-LHipPitch)))#'1.000000'
vals['lthigh_1_2'] = '0.000000'
vals['lthigh_1_3'] = fmtstr.format(math.sin(math.radians(-LHipPitch)))#'0.000000'
vals['lthigh_2_1'] = '0.000000'
vals['lthigh_2_2'] = '1.000000'
vals['lthigh_2_3'] = '0.000000'
vals['lthigh_3_1'] = fmtstr.format(-math.sin(math.radians(-LHipPitch)))#'0.000000'
vals['lthigh_3_2'] = '0.000000'
vals['lthigh_3_3'] = fmtstr.format(math.cos(math.radians(-LHipPitch)))#'1.000000'
vals['lshinebone_1_1'] = fmtstr.format(math.cos(math.radians(-LKneePitch)))#'1.000000'
vals['lshinebone_1_2'] = '0.000000'
vals['lshinebone_1_3'] = fmtstr.format(math.sin(math.radians(-LKneePitch)))#'0.000000'
vals['lshinebone_2_1'] = '0.000000'
vals['lshinebone_2_2'] = '1.000000'
vals['lshinebone_2_3'] = '0.000000'
vals['lshinebone_3_1'] = fmtstr.format(-math.sin(math.radians(-LKneePitch)))#'0.000000'
vals['lshinebone_3_2'] = '0.000000'
vals['lshinebone_3_3'] = fmtstr.format(math.cos(math.radians(-LKneePitch)))#'1.000000'
vals['lankle_1_1'] = fmtstr.format(math.cos(math.radians(-LAnklePitch)))#'1.000000'
vals['lankle_1_2'] = '0.000000'
vals['lankle_1_3'] = fmtstr.format(math.sin(math.radians(-LAnklePitch)))#'0.000000'
vals['lankle_2_1'] = '0.000000'
vals['lankle_2_2'] = '1.000000'
vals['lankle_2_3'] = '0.000000'
vals['lankle_3_1'] = fmtstr.format(-math.sin(math.radians(-LAnklePitch)))#'0.000000'
vals['lankle_3_2'] = '0.000000'
vals['lankle_3_3'] = fmtstr.format(math.cos(math.radians(-LAnklePitch)))#'1.000000'
vals['lfoot_1_1'] = '1.000000'
vals['lfoot_1_2'] = '0.000000'
vals['lfoot_1_3'] = '0.000000'
vals['lfoot_2_1'] = '0.000000'
vals['lfoot_2_2'] = fmtstr.format(math.cos(math.radians(-LAnkleRoll)))#'1.000000'
vals['lfoot_2_3'] = fmtstr.format(-math.sin(math.radians(-LAnkleRoll)))#'0.000000'
vals['lfoot_3_1'] = '0.000000'
vals['lfoot_3_2'] = fmtstr.format(math.sin(math.radians(-LAnkleRoll)))#'0.000000'
vals['lfoot_3_3'] = fmtstr.format(math.cos(math.radians(-LAnkleRoll)))#'1.000000'
# }}}
s = "{"
for key in vals:
s += "s/_"+key+"_/"+vals[key]+"/ "+os.linesep
s += "}"
# print(s)
with open(os.path.splitext(infile)[0]+".mod.html",'w') as f:
p = call(['sed', '-e', s, infile], stdout=f)
|
lgpl-3.0
| 1,122,742,542,264,320,800
| 37.273703
| 90
| 0.665483
| false
| 2.036843
| false
| false
| false
|
jadhavhninad/-CSE_515_MWD_Analytics-
|
Phase 1/Project Code/phase1_code/print_actor_vector.py
|
1
|
4823
|
from mysqlConn import DbConnect
import argparse
import operator
from math import log
import pprint
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("ACTOR_ID")
parser.add_argument("MODEL")
args = parser.parse_args()
#print args.ACTOR_ID
#TF MODEL
#Subtask:1 - Get tags and movieRank weight for an actor id
#a. A dictionary to store the returned data.
data_dictionary_tf = {}
data_dictionary_tf_idf = {}
#Get sum of the rank_weights for calculating the idf value (sum of all rankweights/ sum of rank_weights for a specific tag)
cur2.execute("SELECT SUM(rank_wt_norm) FROM `movie-actor`")
result0 = cur2.fetchone()
total_rank_weight = result0[0]
total_tag_newness_weight = 0
#Get total movie-acto count for idf calculation. Here every movie-actor row value is a document , ie a combination
#for which a particular tag occurs.
cur2.execute("SELECT COUNT(distinct movieid,actorid) FROM `movie-actor`")
result0 = cur2.fetchone()
total_documents = float(result0[0])
#print total_rank_weight
cur2.execute("SELECT movieid,rank_wt_norm FROM `movie-actor` where actorid = %s",[args.ACTOR_ID])
result1 = cur2.fetchall()
for data1 in result1:
#print data1
act_movie_id = data1[0]
act_movie_rank_wt = data1[1]
actor_tag_id=""
final_tag_wt=""
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[act_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
#_size_count = _size_count + 1
actor_tag_id = data2[0]
actor_tag_newness = data2[1]
#Get the tag_name for the tagID. For each tag weight, add the rank_weight as well.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [actor_tag_id])
result2_sub = cur2.fetchall()
tagName = result2_sub[0]
#tagWeight = round(((float(actor_tag_newness)/ float(total_tag_newness_weight)) * float(act_movie_rank_wt)),10)
tagWeight = round((float(actor_tag_newness) * float(act_movie_rank_wt)), 10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
if tagName in data_dictionary_tf:
data_dictionary_tf[tagName] = round((data_dictionary_tf[tagName] + tagWeight), 10)
else:
data_dictionary_tf[tagName] = tagWeight
#Make weight of other tags to zero and for all the tags already there, calculate the TF by dividing with total_tag_newness_weight
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for key in tagName:
if key in data_dictionary_tf:
#print 'curval',key
data_dictionary_tf[key] = round((float(data_dictionary_tf[key]) / float(total_tag_newness_weight)),10)
else:
data_dictionary_tf[key] = 0
actor_model_value_tf = sorted(data_dictionary_tf.items(), key=operator.itemgetter(1), reverse=True)
#IDF CALCULATION.
if args.MODEL == "tf":
pprint.pprint(actor_model_value_tf)
else:
#TF-IDF CALCULATION
cur2.execute("SELECT movieid FROM `movie-actor` where actorid = %s", [args.ACTOR_ID])
result3 = cur2.fetchall()
for data1 in result3:
# print data1
act_movie_id = data1[0]
# Select tagIDs for the movieID. we choose distinct since the total_weighted_movie_actor_count is already precomputed.
cur2.execute("SELECT distinct(tagid) FROM mltags WHERE movieid = %s", [act_movie_id])
result4 = cur2.fetchall()
for data2 in result4:
actor_tag_id = data2[0]
cur2.execute("SELECT tag,total_wt_movie_actor_count FROM `genome-tags` WHERE tagID = %s", [actor_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tag_movie_actor_count = result2_sub[1]
if tagName in data_dictionary_tf_idf:
continue
else:
data_dictionary_tf_idf[tagName] = float(tag_movie_actor_count)
#Once all the tag data has been recorded, calculate the idf and tfidf for each tag.
#Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tgName = cur2.fetchall()
for key in tgName:
keyval = key[0]
if keyval in data_dictionary_tf_idf:
data_dictionary_tf_idf[keyval] = round((float(log((total_documents / data_dictionary_tf_idf[keyval]), 2.71828))),10)
data_dictionary_tf_idf[keyval] = round(float(float(data_dictionary_tf[key]) * float(data_dictionary_tf_idf[keyval])), 10)
else:
data_dictionary_tf_idf[keyval] = 0
actor_model_value_tf_idf = sorted(data_dictionary_tf_idf.items(), key=operator.itemgetter(1), reverse=True)
pprint.pprint(actor_model_value_tf_idf)
|
gpl-3.0
| -3,575,886,717,172,771,000
| 33.697842
| 133
| 0.671574
| false
| 3.29215
| false
| false
| false
|
philippj/python-burningseries
|
notifier.py
|
1
|
3494
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
#python-burningseries Copyright (C) 2015 Philipp "freaK"
'''
YOWSUP 2.0
https://github.com/tgalal/yowsup
'''
'''
python-burningseries
# This does send notifications to either email or a whatsapp number using yowsup-cli
'''
#Example for notify params
'''
# Yowsup
{
'phones': ['12309123892139'],
'configFile': '/home/mayowsupconfigfile.config' #must be absolute
}
# Email
{
'addresses': ['test@example.com'],
'senderAddress': 'test@example.com',
'sendmail_location': '/usr/sbin/sendmail'
}
#others will come
'''
import os
import time
import threading
import smtplib
from base import BurningSeriesBase
class BurningSeriesNotifier(threading.Thread):
def __init__(self, notifyType='email', notifyParams={'addresses': ['test@example.com'], 'senderAddress': 'test@example.com', 'sendmail_location': '/usr/sbin/sendmail'}):
threading.Thread.__init__(self)
bsBase = BurningSeriesBase(True, False)
self.notifyType = notifyType
self.notifyParams = notifyParams
bsBase.callbacks['newEpisode'] = self.newEpisode
bsBase.callbacks['newSeason'] = self.newSeason
bsBase.callbacks['newEpisodeInLanguage'] = self.newEpisodeInLanguage
self.notify = None
#not using the direct yowsup library, only the console commands to keep it simple
if notifyType == 'yowsup':
if not os.path.isfile(notifyParams['configFile']):
notifyType = 'email'
else:
self.notify = self.sendYowsupCommand
elif notifyType == 'email':
self.notify = self.sendEmailCommand
def newEpisode(self, series, config, params):
string = config['title'] + ' | Season ' + str(params['season']) + ' | Episode ' + str(params['episode']) + ' is now available! Titles: (DE) ' + config['episodes'][params['season']][params['episode']]['titles']['german'] + ' / (EN) ' + config['episodes'][params['season']][params['episode']]['titles']['english']
self.notify(string)
def newSeason(self, series, config, params):
string = config['title'] + ' | Season ' + str(params['newSeason']) + ' is now available!'
self.notify(string)
def newEpisodeInLanguage(self, series, config, params):
string = config['title'] + ' | A episode is now available in - ' + params['language'] + ' - Title: ' + params['title']
self.notify(string)
def sendEmailCommand(self, message):
#self.smtplibServer.sendmail(self.notifyParams['senderAddress'], self.notifyParams['addresses'], final_message)
p = os.popen("%s -t" % self.notifyParams['sendmail_location'], "w")
p.write("From: %s\n" % self.notifyParams['senderAddress'])
p.write("To: %s\n" % ",".join(self.notifyParams['addresses']))
p.write("Subject: BurningSeriesNotifier\n")
p.write("\n")
p.write(message)
p.close()
def sendYowsupCommand(self, message):
for number in self.notifyParams['phones']:
print os.system('yowsup-cli demos -s ' + number + ' "' + message + '" -c ' + self.notifyParams['configFile'])
x = BurningSeriesNotifier()
while True:
try:
time.sleep(1)
except:
raise SystemExit
|
gpl-2.0
| -1,798,651,883,595,741,000
| 32.932039
| 321
| 0.59731
| false
| 3.793702
| true
| false
| false
|
ipa-led/airbus_coop
|
airbus_docgen/src/airbus_docgen/docgen/pkg/__init__.py
|
1
|
7020
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from airbus_docgen import env
from airbus_docgen.common import html
from airbus_docgen.common.html import HtmlElement, HtmlElementTree
from airbus_docgen.docgen.pkg.summary import PackageSummary
from airbus_docgen.docgen.pkg.description import PackageDescription
from airbus_docgen.docgen.pkg.dependencies import PackageDependencies
from airbus_docgen.docgen.pkg.generations import PackageGenerations
from airbus_docgen.docgen.pkg.node import RosNode
class AgiDoc(HtmlElement):
def __init__(self):
HtmlElement.__init__(self,
tag=html.Sections.section,
attrib={"class":"nodes"})
def read(self, pkgdir, agi_xml, index):
index_node=0
for node_xml in agi_xml.iter('node'):
index_node+=1
title = HtmlElement(html.Sections.h3)
node_name = node_xml.attrib['name']
title.text = "%i.%i. %s"%(index, index_node, node_name)
self.append(title)
try:
ros_node = RosNode()
if ros_node.read(node_name, node_xml, index, index_node) is True:
self.append(ros_node)
except Exception as ex:
html.HTMLException(ex, self)
if index_node is 0:
return False
else:
return True
class RosPackage(HtmlElement):
def __init__(self, pkgdir):
HtmlElement.__init__(self,
tag=html.Sections.section,
attrib={"class":"package"})
self._h2_index = 0
self._dep_pkg = None
pkg_xml = None
# Load and read package.xml ressource
pkg_xml_dir = pkgdir+'/package.xml'
if os.access(pkg_xml_dir, os.R_OK):
pkg_xml = html.loadHtml(pkg_xml_dir)
self._read_pkg_xml(pkgdir, pkg_xml)
else:
html.HTMLException("Cannot found %s !"%pkg_xml_dir, self)
# Load and read CMakeLists.txt ressource
cmakelists_dir = pkgdir+'/CMakeLists.txt'
if os.access(cmakelists_dir, os.R_OK):
with open(cmakelists_dir) as fp:
cmakelists = fp.read()
self._read_cmakelists(pkgdir, cmakelists)
else:
html.HTMLException("Cannot found %s !"%cmakelists_dir, self)
if pkg_xml is not None:
self._read_agi_doc_xml(pkgdir, pkg_xml)
def _read_pkg_xml(self, pkgdir, pkg_xml):
pkg_name = HtmlElement(html.Sections.h1)
pkg_name.text = pkg_xml.find("./name").text
self.append(pkg_name)
p = HtmlElement(html.Grouping.p)
p.set("align","center")
img = HtmlElement(html.EmbeddedContent.img)
img.set("src","../dot/gen/%s.png"%pkg_xml.find("./name").text)
p.append(img)
self.append(p)
pkg_summary_title = HtmlElement(html.Sections.h2)
pkg_summary_title.text = "%i. Package Summary"%self.index_h2()
self.append(pkg_summary_title)
try:
self.append(PackageSummary(pkgdir, pkg_xml))
except Exception as ex:
html.HTMLException(ex, self)
pkg_desc_title = HtmlElement(html.Sections.h2)
pkg_desc_title.text = "%i. Package description"%self.index_h2()
self.append(pkg_desc_title)
try:
self.append(PackageDescription(pkgdir, pkg_xml))
except Exception as ex:
html.HTMLException(ex, self)
pkg_dep_title = HtmlElement(html.Sections.h2)
pkg_dep_title.text = "%i. Package dependencies"%self.index_h2()
self.append(pkg_dep_title)
try:
self._dep_pkg = PackageDependencies(pkgdir, pkg_xml)
self.append(self._dep_pkg)
except Exception as ex:
html.HTMLException(ex, self)
def _read_cmakelists(self, pkgdir, cmakefile):
try:
pkg = PackageGenerations()
dep_list = self._dep_pkg.get_dependencies_lists()
if pkg.read(pkgdir, cmakefile, dep_list) is True:
pkg_build_title = HtmlElement(html.Sections.h2)
pkg_build_title.text = "%i. Package generation(s)"%self.index_h2()
self.append(pkg_build_title)
self.append(pkg)
except Exception as ex:
html.HTMLException(ex, self)
def _read_agi_doc_xml(self, pkgdir, pkg_xml):
agidoc_elem = pkg_xml.find("./export/agidoc")
if agidoc_elem is not None:
if 'src' in agidoc_elem.attrib:
fdoc = os.path.join(pkgdir, agidoc_elem.attrib['src'])
if os.path.isfile(fdoc):
agi = AgiDoc()
if agi.read(pkgdir, html.loadHtml(fdoc), self._h2_index+1) is True:
title = HtmlElement(html.Sections.h2)
title.text = "%i. More description"%self.index_h2()
self.append(title)
self.append(agi)
else:
html.HTMLException("Cannot open agidoc '%s'"%fdoc, self)
else:
html.HTMLException("AGI documentation not found !", self)
def index_h2(self):
self._h2_index+=1
return self._h2_index
class HtmlPkgFileGenerator(HtmlElementTree):
def __init__(self, index, pkg_dir, pkg_name):
HtmlElementTree.__init__(self, index.getroot())
self._pkg_name = pkg_name
div = self.getroot().find("./body/div")
try:
pkg = RosPackage(pkg_dir)
div.append(pkg)
except Exception as ex:
html.HTMLException(ex, div)
def save(self):
html.indent(self.getroot())
#print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!",os.path.join(env.ROSDOC_ROOT, "%s.html"%self._pkg_name)
self.write(os.path.join(env.ROSDOC_GEN, "%s.html"%self._pkg_name),
encoding="utf8",
method="xml")
def __str__(self):
html.indent(self.getroot())
return html.tostring(self.getroot())
|
apache-2.0
| 3,991,856,374,120,932,000
| 35.753927
| 135
| 0.562963
| false
| 3.855025
| false
| false
| false
|
chrys87/fenrir
|
src/fenrirscreenreader/commands/commands/review_next_line.py
|
1
|
1627
|
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
from fenrirscreenreader.utils import line_utils
class command():
def __init__(self):
pass
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def getDescription(self):
return _('moves review to the next line ')
def run(self):
self.env['screen']['oldCursorReview'] = self.env['screen']['newCursorReview']
if not self.env['screen']['newCursorReview']:
self.env['screen']['newCursorReview'] = self.env['screen']['newCursor'].copy()
self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], nextLine, endOfScreen = \
line_utils.getNextLine(self.env['screen']['newCursorReview']['x'], self.env['screen']['newCursorReview']['y'], self.env['screen']['newContentText'])
if nextLine.isspace():
self.env['runtime']['outputManager'].presentText(_("blank"), soundIcon='EmptyLine', interrupt=True, flush=False)
else:
self.env['runtime']['outputManager'].presentText(nextLine, interrupt=True, flush=False)
if endOfScreen:
if self.env['runtime']['settingsManager'].getSettingAsBool('review', 'endOfScreen'):
self.env['runtime']['outputManager'].presentText(_('end of screen'), interrupt=True, soundIcon='EndOfScreen')
def setCallback(self, callback):
pass
|
lgpl-3.0
| 8,339,499,634,889,436,000
| 43.194444
| 158
| 0.610326
| false
| 4.077694
| false
| false
| false
|
lipis/life-line
|
main/control/user.py
|
1
|
12057
|
# coding: utf-8
import copy
from flask.ext import login
from flask.ext import wtf
from flask.ext.babel import gettext as __
from flask.ext.babel import lazy_gettext as _
from google.appengine.ext import ndb
import flask
import wtforms
import auth
import cache
import config
import i18n
import model
import task
import util
from main import app
###############################################################################
# User List
###############################################################################
@app.route('/admin/user/')
@auth.admin_required
def user_list():
user_dbs, cursors = model.User.get_dbs(
email=util.param('email'), prev_cursor=True,
)
permissions = list(UserUpdateForm._permission_choices)
permissions += util.param('permissions', list) or []
return flask.render_template(
'user/user_list.html',
html_class='user-list',
title=_('User List'),
user_dbs=user_dbs,
next_url=util.generate_next_url(cursors['next']),
prev_url=util.generate_next_url(cursors['prev']),
api_url=flask.url_for('api.user.list'),
permissions=sorted(set(permissions)),
)
###############################################################################
# User Update
###############################################################################
class UserUpdateForm(i18n.Form):
username = wtforms.StringField(
model.User.username._verbose_name,
[wtforms.validators.required(), wtforms.validators.length(min=2)],
filters=[util.email_filter],
)
name = wtforms.StringField(
model.User.name._verbose_name,
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
model.User.email._verbose_name,
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
locale = wtforms.SelectField(
model.User.locale._verbose_name,
choices=config.LOCALE_SORTED, filters=[util.strip_filter],
)
admin = wtforms.BooleanField(model.User.admin._verbose_name)
active = wtforms.BooleanField(model.User.active._verbose_name)
verified = wtforms.BooleanField(model.User.verified._verbose_name)
permissions = wtforms.SelectMultipleField(
model.User.permissions._verbose_name,
filters=[util.sort_filter],
)
_permission_choices = set()
def __init__(self, *args, **kwds):
super(UserUpdateForm, self).__init__(*args, **kwds)
self.permissions.choices = [
(p, p) for p in sorted(UserUpdateForm._permission_choices)
]
@auth.permission_registered.connect
def _permission_registered_callback(sender, permission):
UserUpdateForm._permission_choices.add(permission)
@app.route('/admin/user/create/', methods=['GET', 'POST'])
@app.route('/admin/user/<int:user_id>/update/', methods=['GET', 'POST'])
@auth.admin_required
def user_update(user_id=0):
if user_id:
user_db = model.User.get_by_id(user_id)
else:
user_db = model.User(name='', username='')
if not user_db:
flask.abort(404)
form = UserUpdateForm(obj=user_db)
for permission in user_db.permissions:
form.permissions.choices.append((permission, permission))
form.permissions.choices = sorted(set(form.permissions.choices))
if form.validate_on_submit():
if not util.is_valid_username(form.username.data):
form.username.errors.append(_('This username is invalid.'))
elif not model.User.is_username_available(form.username.data, user_db.key):
form.username.errors.append(_('This username is already taken.'))
else:
form.populate_obj(user_db)
if auth.current_user_key() == user_db.key:
user_db.admin = True
user_db.active = True
user_db.put()
return flask.redirect(flask.url_for(
'user_list', order='-modified', active=user_db.active,
))
return flask.render_template(
'user/user_update.html',
title=user_db.name or _('New User'),
html_class='user-update',
form=form,
user_db=user_db,
api_url=flask.url_for('api.user', user_key=user_db.key.urlsafe()) if user_db.key else ''
)
###############################################################################
# User Verify
###############################################################################
@app.route('/user/verify/<token>/')
@auth.login_required
def user_verify(token):
user_db = auth.current_user_db()
if user_db.token != token:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('profile'))
user_db.verified = True
user_db.token = util.uuid()
user_db.put()
flask.flash(__('Hooray! Your email is now verified.'), category='success')
return flask.redirect(flask.url_for('profile'))
###############################################################################
# User Forgot
###############################################################################
class UserForgotForm(i18n.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = wtf.RecaptchaField()
@app.route('/user/forgot/', methods=['GET', 'POST'])
def user_forgot(token=None):
if not config.CONFIG_DB.has_email_authentication:
flask.abort(418)
form = auth.form_with_recaptcha(UserForgotForm(obj=auth.current_user_db()))
if form.validate_on_submit():
cache.bump_auth_attempt()
email = form.email.data
user_dbs, cursors = util.get_dbs(
model.User.query(), email=email, active=True, limit=2,
)
count = len(user_dbs)
if count == 1:
task.reset_password_notification(user_dbs[0])
return flask.redirect(flask.url_for('welcome'))
elif count == 0:
form.email.errors.append('This email was not found')
elif count == 2:
task.email_conflict_notification(email)
form.email.errors.append(
'''We are sorry but it looks like there is a conflict with your
account. Our support team is already informed and we will get back to
you as soon as possible.'''
)
if form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'user/user_forgot.html',
title=_('Forgot Password?'),
html_class='user-forgot',
form=form,
)
###############################################################################
# User Reset
###############################################################################
class UserResetForm(i18n.Form):
new_password = wtforms.StringField(
_('New Password'),
[wtforms.validators.required(), wtforms.validators.length(min=6)],
)
@app.route('/user/reset/<token>/', methods=['GET', 'POST'])
@app.route('/user/reset/')
def user_reset(token=None):
user_db = model.User.get_by('token', token)
if not user_db:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('welcome'))
if auth.is_logged_in():
login.logout_user()
return flask.redirect(flask.request.path)
form = UserResetForm()
if form.validate_on_submit():
user_db.password_hash = util.password_hash(user_db, form.new_password.data)
user_db.token = util.uuid()
user_db.verified = True
user_db.put()
flask.flash(__('Your password was changed succesfully.'), category='success')
return auth.signin_user_db(user_db)
return flask.render_template(
'user/user_reset.html',
title='Reset Password',
html_class='user-reset',
form=form,
user_db=user_db,
)
###############################################################################
# User Activate
###############################################################################
class UserActivateForm(i18n.Form):
name = wtforms.StringField(
model.User.name._verbose_name,
[wtforms.validators.required()], filters=[util.strip_filter],
)
password = wtforms.StringField(
_('Password'),
[wtforms.validators.required(), wtforms.validators.length(min=6)],
)
@app.route('/user/activate/<token>/', methods=['GET', 'POST'])
def user_activate(token):
if auth.is_logged_in():
login.logout_user()
return flask.redirect(flask.request.path)
user_db = model.User.get_by('token', token)
if not user_db:
flask.flash(__('That link is either invalid or expired.'), category='danger')
return flask.redirect(flask.url_for('welcome'))
form = UserActivateForm(obj=user_db)
if form.validate_on_submit():
form.populate_obj(user_db)
user_db.password_hash = util.password_hash(user_db, form.password.data)
user_db.token = util.uuid()
user_db.verified = True
user_db.put()
return auth.signin_user_db(user_db)
return flask.render_template(
'user/user_activate.html',
title='Activate Account',
html_class='user-activate',
user_db=user_db,
form=form,
)
###############################################################################
# User Merge
###############################################################################
class UserMergeForm(i18n.Form):
user_key = wtforms.HiddenField('User Key', [wtforms.validators.required()])
user_keys = wtforms.HiddenField('User Keys', [wtforms.validators.required()])
username = wtforms.StringField(_('Username'), [wtforms.validators.optional()])
name = wtforms.StringField(
_('Name (merged)'),
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
_('Email (merged)'),
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
@app.route('/admin/user/merge/', methods=['GET', 'POST'])
@auth.admin_required
def user_merge():
user_keys = util.param('user_keys', list)
if not user_keys:
flask.abort(400)
user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys]
user_dbs = ndb.get_multi(user_db_keys)
if len(user_dbs) < 2:
flask.abort(400)
user_dbs.sort(key=lambda user_db: user_db.created)
merged_user_db = user_dbs[0]
auth_ids = []
permissions = []
is_admin = False
is_active = False
for user_db in user_dbs:
auth_ids.extend(user_db.auth_ids)
permissions.extend(user_db.permissions)
is_admin = is_admin or user_db.admin
is_active = is_active or user_db.active
if user_db.key.urlsafe() == util.param('user_key'):
merged_user_db = user_db
auth_ids = sorted(list(set(auth_ids)))
permissions = sorted(list(set(permissions)))
merged_user_db.permissions = permissions
merged_user_db.admin = is_admin
merged_user_db.active = is_active
merged_user_db.verified = False
form_obj = copy.deepcopy(merged_user_db)
form_obj.user_key = merged_user_db.key.urlsafe()
form_obj.user_keys = ','.join(user_keys)
form = UserMergeForm(obj=form_obj)
if form.validate_on_submit():
form.populate_obj(merged_user_db)
merged_user_db.auth_ids = auth_ids
merged_user_db.put()
deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key]
merge_user_dbs(merged_user_db, deprecated_keys)
return flask.redirect(
flask.url_for('user_update', user_id=merged_user_db.key.id()),
)
return flask.render_template(
'user/user_merge.html',
title=_('Merge Users'),
html_class='user-merge',
user_dbs=user_dbs,
merged_user_db=merged_user_db,
form=form,
auth_ids=auth_ids,
api_url=flask.url_for('api.user.list', user_keys=','.join(user_keys)),
)
@ndb.transactional(xg=True)
def merge_user_dbs(user_db, deprecated_keys):
# TODO: Merge possible user data before handling deprecated users
deprecated_dbs = ndb.get_multi(deprecated_keys)
for deprecated_db in deprecated_dbs:
deprecated_db.auth_ids = []
deprecated_db.active = False
deprecated_db.verified = False
if not deprecated_db.username.startswith('_'):
deprecated_db.username = '_%s' % deprecated_db.username
ndb.put_multi(deprecated_dbs)
|
mit
| -6,274,276,460,085,191,000
| 31.763587
| 94
| 0.606121
| false
| 3.680403
| false
| false
| false
|
jschornick/i2c_device
|
setup.py
|
1
|
1721
|
import os
from glob import glob
from setuptools import setup, find_packages
# Setup flags and parameters
pkg_name = 'i2c_device' # top-level package name
# Cache readme contents for use as long_description
readme = open('readme.md').read()
# Call setup()
setup(
name=pkg_name,
version='0.1',
description='I2C device configuration library',
long_description=readme,
url='https://github.com/jschornick/i2c_device',
author='Jeff Schornick',
author_email='jeff@schornick.org',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
data_files = [ ('configs', glob("configs/*")) ],
scripts = glob("examples/*"),
# NOTE: This module has been most thoroughly tested using the python-smbus
# library, which is NOT available via PyPI. Install separately
# via your favorite package manager or from the source:
# http://www.lm-sensors.org/browser/i2c-tools/trunk/py-smbus/
#
# Alternately, try using smbus-cffi below, which just might work, but
# is definitely slower.
install_requires=[
#'smbus-cffi',
'PyYAML'
],
test_suite=(pkg_name + '.tests'),
tests_require=['mock'],
platforms='any',
keywords='i2c device abstraction development utilities tools',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
])
|
mit
| -3,040,005,242,851,160,000
| 32.096154
| 77
| 0.677513
| false
| 3.717063
| false
| false
| false
|
alexandrosstergiou/The-Drivers-Assistant-Traffic-Sign-Recognition
|
show.py
|
1
|
1041
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
from os import listdir, getcwd
from os import chdir
from PIL import Image
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mimage
from matplotlib.backends.backend_pdf import PdfPages
files = listdir('CNN_run2/Visualisations_w_folders/max_pooling_3')
chdir('CNN_run2/Visualisations_w_folders/max_pooling_3')
images = [Image.open(f).convert('LA') for f in files]
"""
fig = plt.figure()
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 5), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
"""
num_rows = 1
num_cols = 128
fig = plt.figure()
gs = gridspec.GridSpec(num_rows, num_cols, wspace=0.0)
i = 0
for g in gs:
ax = plt.subplot(g)
ax.imshow(images[i])
ax.set_xticks([])
ax.set_yticks([])
i = i + 1
# ax.set_aspect('auto')
plt.axis('off')
plt.show()
|
mit
| 2,028,056,922,246,869,200
| 21.148936
| 66
| 0.691643
| false
| 2.965812
| false
| false
| false
|
andyneff/python-plyfile
|
examples/plot.py
|
1
|
1337
|
'''
Example script illustrating plotting of PLY data using Mayavi. Mayavi
is not a dependency of plyfile, but you will need to install it in order
to run this script. Failing to do so will immediately result in
ImportError.
'''
from argparse import ArgumentParser
import numpy
from mayavi import mlab
from plyfile import PlyData
def main():
parser = ArgumentParser()
parser.add_argument('ply_filename')
args = parser.parse_args()
plot(PlyData.read(args.ply_filename))
mlab.show()
def plot(ply):
'''
Plot vertices and triangles from a PlyData instance. Assumptions:
`ply' has a 'vertex' element with 'x', 'y', and 'z'
properties;
`ply' has a 'face' element with an integral list property
'vertex_indices', all of whose elements have length 3.
'''
vertex = ply['vertex']
(x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
mlab.points3d(x, y, z, color=(1, 1, 1), mode='point')
if 'face' in ply:
tri_idx = ply['face']['vertex_indices']
idx_dtype = tri_idx[0].dtype
triangles = numpy.fromiter(tri_idx, [('data', idx_dtype, (3,))],
count=len(tri_idx))['data']
mlab.triangular_mesh(x, y, z, triangles,
color=(1, 0, 0.4), opacity=0.5)
main()
|
gpl-3.0
| -5,918,219,094,796,757,000
| 23.759259
| 72
| 0.59985
| false
| 3.445876
| false
| false
| false
|
Akuli/porcupine
|
tests/test_filetypes_plugin.py
|
1
|
3057
|
import logging
import pathlib
import sys
from tkinter import filedialog
import pytest
from porcupine import dirs, filedialog_kwargs, get_main_window
from porcupine.plugins import filetypes
@pytest.fixture
def custom_filetypes():
# We don't overwrite the user's file because porcupine.dirs is monkeypatched
assert not dirs.user_config_dir.startswith(str(pathlib.Path.home()))
user_filetypes = pathlib.Path(dirs.user_config_dir) / "filetypes.toml"
user_filetypes.write_text(
"""
['Mako template']
filename_patterns = ["mako-templates/*.html"]
pygments_lexer = 'pygments.lexers.MakoHtmlLexer'
"""
)
filetypes.load_filetypes()
filetypes.set_filedialog_kwargs()
yield
user_filetypes.unlink()
filetypes.filetypes.clear()
filetypes.load_filetypes()
filetypes.set_filedialog_kwargs()
def test_filedialog_patterns_got_stripped():
python_patterns = dict(filedialog_kwargs["filetypes"])["Python"]
assert "*.py" not in python_patterns
assert ".py" in python_patterns
@pytest.mark.skipif(sys.platform != "linux", reason="don't know how filedialog works on non-Linux")
def test_actually_running_filedialog(custom_filetypes):
# Wait and then press Esc. That's done as Tcl code because the Tk widget
# representing the dialog can't be used with tkinter.
root = get_main_window().nametowidget(".")
root.after(1000, root.eval, "event generate [focus] <Escape>")
# If filedialog_kwargs are wrong, then this errors.
filedialog.askopenfilename(**filedialog_kwargs)
def test_bad_filetype_on_command_line(run_porcupine):
output = run_porcupine(["-n", "FooBar"], 2)
assert "no filetype named 'FooBar'" in output
def test_unknown_filetype(filetab, tmp_path):
# pygments does not know graphviz, see how it gets handled
filetab.textwidget.insert(
"end",
"""\
digraph G {
Hello->World;
}
""",
)
filetab.path = tmp_path / "graphviz-hello-world.gvz"
filetab.save()
lexer_class_name = filetypes.get_filetype_for_tab(filetab)["pygments_lexer"]
assert lexer_class_name.endswith(".TextLexer")
def test_slash_in_filename_patterns(custom_filetypes, caplog, tmp_path):
def lexer_name(path):
return filetypes.guess_filetype_from_path(path)["pygments_lexer"]
assert lexer_name(tmp_path / "foo" / "bar.html") == "pygments.lexers.HtmlLexer"
assert lexer_name(tmp_path / "lol-mako-templates" / "bar.html") == "pygments.lexers.HtmlLexer"
with caplog.at_level(logging.WARNING):
assert (
lexer_name(tmp_path / "mako-templates" / "bar.html") == "pygments.lexers.MakoHtmlLexer"
)
assert len(caplog.records) == 1
assert "2 file types match" in caplog.records[0].message
assert str(tmp_path) in caplog.records[0].message
assert "HTML, Mako template" in caplog.records[0].message
# filedialog doesn't support slashes in patterns
for filetype_name, patterns in filedialog_kwargs["filetypes"]:
for pattern in patterns:
assert "/" not in pattern
|
mit
| -3,899,674,040,829,050,400
| 32.228261
| 99
| 0.697089
| false
| 3.550523
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.