repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
trondhindenes/ansible | contrib/inventory/foreman.py | Python | gpl-3.0 | 17,060 | 0.000997 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
# Daniel Lobato Garcia <dlobatog@redhat.com>
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with it. If not, see <http://www.gnu.org/licenses/>.
#
# This is somewhat based on cobbler inventory
# Stdlib imports
# __future__ imports must occur at the beginning of file
from __future__ import print_function
try:
# Python 2 version
import ConfigParser
except ImportError:
# Python 3 version
import configparser as ConfigParser
import json
import argparse
import copy
import os
import re
import sys
from time import time
from collections import defaultdict
from distutils.version import LooseVersion, StrictVersion
# 3rd party imports
import requests
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
print('This script requires python-requests 1.1 as a minimum version')
sys.exit(1)
from requests.auth import HTTPBasicAuth
from ansible.module_utils._text import to_text
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):
self.inventory = defaultdict(list) # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.params = dict() # Params of each host
self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session
self.config_paths = [
"/etc/ansible/foreman.ini",
os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
]
env_value = os.environ.get('FOREMAN_INI_PATH')
if env_value is not None:
self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
def read_settings(self):
"""Reads the settings from the foreman.ini file"""
config = ConfigParser.SafeConfigParser()
config.read(self.config_paths)
# Foreman API related
try:
self.foreman_url = config.get('foreman', 'url')
self.foreman_user = config.get('foreman', 'user')
self.foreman_pw = config.get('foreman', 'password', raw=True)
self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print("Error parsing configuration: %s" % e, file=sys.stderr)
return False
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = json.loads(group_patterns)
try:
self.group_prefix = config.get('ansible', 'group_prefix')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.group_prefix = "foreman_"
try:
self.want_facts = config.getboolean('ansible', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError | , ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.No | OptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_cache = cache_path + "/%s.cache" % script
self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try:
self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
self.args = parser.parse_args()
def _get_session(self):
if not self.session:
self.session = requests.session()
self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
self.session.verify = self.foreman_ssl_verify
return self.session
def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1
results = []
s = self._get_session()
while True:
params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
json = ret.json()
# /hosts/:id has not results key
if 'results' not in json:
return json
# Facts are returned as dict in results not list
if isinstance(json['results'], dict):
return json['results']
# List of all hosts is returned paginaged
results = results + json['results']
if len(results) >= json['subtotal']:
break
page += 1
if len(json['results']) == 0:
print("Did not make any progress during loop. "
"expected %d got %d" % (json['total'], len(results)),
file=sys.stderr)
break
return results
def _get_hosts(self):
url = "%s/api/v2/hosts" % self.foreman_url
params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
|
yuyuyu101/VirtualBox-NetBSD | src/libs/xpcom18a4/python/test/pyxpcom_test_tools.py | Python | gpl-2.0 | 4,286 | 0.008166 | # test tools for the pyxpcom bindings
from xpcom import _xpcom
import unittest
# export a "getmemusage()" function that returns a useful "bytes used" count
# for the current process. Growth in this when doing the same thing over and
# over implies a leak.
try:
import win32api
import win32pdh
import win32pdhutil
have_pdh = 1
except ImportError:
have_pdh = 0
# XXX - win32pdh is slow, particularly finding our current process.
# A better way would be good.
# Our win32pdh specific functions - they can be at the top-level on all
# platforms, but will only actually be called if the modules are available.
def FindMyCounter():
pid_me = win32api.GetCurrentProcessId()
object = "Process"
items, instances = win32pdh.EnumObjectItems(None,None,object, -1)
for instance in instances:
# We use 2 counters - "ID Process" and "Working Set"
counter = "ID Process"
format = win32pdh.PDH_FMT_LONG
hq = win32pdh.OpenQuery()
path = win32pdh.MakeCounterPath( (None,object,instance, None, -1,"ID Process") )
hc1 = win32pdh.AddCounter(hq, path)
path = win32pdh.MakeCounterPath( (None,object,instance, None, -1,"Working Set") )
hc2 = win32pdh.AddCounter(hq, path)
win32pdh.CollectQueryData(hq)
type, pid = win32pdh.GetFormattedCounterValue(hc1, format)
if pid==pid_me:
win32pdh.RemoveCounter(hc1) # not needed any more
return hq, hc2
# Not mine - close the query and try again
win32pdh.RemoveCounter(hc1)
win32pdh.RemoveCounter(hc2)
win32pdh.CloseQuery(hq)
else:
raise RuntimeError, "Can't find myself!?"
def CloseCounter(hq, hc):
win32pdh.RemoveCounter(hc)
win32pdh.CloseQuery(hq)
def GetCounterValue(hq, hc):
win32pdh.CollectQueryData(hq)
format = win32pdh.PDH_FMT_LONG
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
g_pdh_data = None
# The pdh function that does the work
def pdh_getmemusage():
global g_pdh_data
if g_pdh_data is None:
hq, hc = FindMyCounter()
g_pdh_data = hq, hc
hq, hc = g_pdh_data
return GetCounterValue(hq, hc)
# The public bit
if have_pdh:
getmemusage = pdh_getmemusage
else:
def getmemusage():
return 0
# Test runner utilities, including some support for builti | n leak tests.
class TestLoader(unittest.TestLoader):
def loadTestsFromTestCase(self, testCaseClass):
"""Ret | urn a suite of all tests cases contained in testCaseClass"""
leak_tests = []
for name in self.getTestCaseNames(testCaseClass):
real_test = testCaseClass(name)
leak_test = self._getTestWrapper(real_test)
leak_tests.append(leak_test)
return self.suiteClass(leak_tests)
def _getTestWrapper(self, test):
# later! see pywin32's win32/test/util.py
return test
def loadTestsFromModule(self, mod):
if hasattr(mod, "suite"):
ret = mod.suite()
else:
ret = unittest.TestLoader.loadTestsFromModule(self, mod)
assert ret.countTestCases() > 0, "No tests in %r" % (mod,)
return ret
def loadTestsFromName(self, name, module=None):
test = unittest.TestLoader.loadTestsFromName(self, name, module)
if isinstance(test, unittest.TestSuite):
pass # hmmm? print "Don't wrap suites yet!", test._tests
elif isinstance(test, unittest.TestCase):
test = self._getTestWrapper(test)
else:
print "XXX - what is", test
return test
# A base class our tests should derive from (well, one day it will be)
TestCase = unittest.TestCase
def suite_from_functions(*funcs):
suite = unittest.TestSuite()
for func in funcs:
suite.addTest(unittest.FunctionTestCase(func))
return suite
def testmain(*args, **kw):
new_kw = kw.copy()
if not new_kw.has_key('testLoader'):
new_kw['testLoader'] = TestLoader()
try:
unittest.main(*args, **new_kw)
finally:
_xpcom.NS_ShutdownXPCOM()
ni = _xpcom._GetInterfaceCount()
ng = _xpcom._GetGatewayCount()
if ni or ng:
print "********* WARNING - Leaving with %d/%d objects alive" % (ni,ng)
|
Stracksapp/stracks_api | stracks_api/middleware.py | Python | bsd-2-clause | 2,170 | 0.005069 |
try:
from django.conf import settings
STRACKS_CONNECTOR = settings.STRACKS_CONNECTOR
except (ImportError, AttributeError):
STRACKS_CONNECTOR = None
STRACKS_API = None
from stracks_api.api import API
from stracks_api import client
import django.http
STRACKS_API = None
if STRACKS_CONNECTOR:
STRACKS_API = API()
class StracksMiddleware(object):
def process_request(self, request):
if not STRACKS_API:
return
##
## get useragent, ip, path
## fetch session, create one if necessary
## create reque | st, store it in local thread storage
useragent = request.META.get('HTTP_USER_AGENT', 'unknown')
ip = request.META.get('REMOTE_ADDR', '<none>')
path = request.get_full_path()
sess = | request.session.get('stracks-session')
if sess is None:
sess = STRACKS_API.session()
request.session['stracks-session'] = sess
request = sess.request(ip, useragent, path)
client.set_request(request)
def process_response(self, request, response):
if not STRACKS_API:
return response
r = client.get_request()
if r:
if not request.user.is_anonymous():
## if there's an active user then he owns
## the request. We need to map it to an
## entity
from django.utils.importlib import import_module
ueb = getattr(settings, 'USER_ENTITY_BUILDER', None)
if ueb:
## XXX error handling
modstr, func = settings.USER_ENTITY_BUILDER.rsplit('.', 1)
mod = import_module(modstr)
f = getattr(mod, func)
r.set_owner(f(request.user))
r.end()
client.set_request(None)
return response
def process_exception(self, request, exception):
if not STRACKS_API:
return
## do not log 404 exceptions, see issue #356
if isinstance(exception, django.http.Http404):
return
client.exception("Crash: %s" % exception)
|
dmlc/tvm | tests/python/relay/op/annotation/test_annotation.py | Python | apache-2.0 | 2,947 | 0.001357 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotations."""
import tvm
from tvm import relay
import pytest
def test_on_device_via_string():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.virtual_device.virtual_device_id == 0
assert call.attrs.virtual_device.target is None
assert call.attrs.virtual_device.memory_scope == ""
assert call.attrs.constr | ain_body
assert not call.attrs.constrain_result
def test_on_device_via_device():
x = relay.Var("x")
call = relay.annotation.on_device(x, tvm.device("cpu"))
assert call.attrs.virtual_device.device_type_int == 1 # ie kDLCPU
def test_on_device_invalid_device():
x = relay.Var("x")
pytest.raises(ValueError, lambda: relay.annotation.on_device(x, "bogus"))
def test_on_device_fixed():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=Tr | ue)
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.constrain_body
assert call.attrs.constrain_result
def test_on_device_free():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=False, constrain_body=False)
assert call.attrs.virtual_device.device_type_int == -1 # ie kInvalidDeviceType
assert not call.attrs.constrain_body
assert not call.attrs.constrain_result
def test_function_on_device():
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], relay.add(x, y))
func = relay.annotation.function_on_device(f, ["cpu", "cuda"], "cuda")
assert isinstance(func, relay.Function)
assert len(func.attrs["param_virtual_devices"]) == 2
assert func.attrs["param_virtual_devices"][0].device_type_int == 1 # ie kDLCPU
assert func.attrs["param_virtual_devices"][1].device_type_int == 2 # ie kDLCUDA
assert func.virtual_device_.device_type_int == 2 # ie KDLCUDA
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
interop-dev/django-netjsongraph | tests/settings.py | Python | mit | 3,592 | 0.001949 | import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_netjsongraph.db',
}
}
SECRET_KEY = 'fn)t*+$)ugeyip6-#txyy$5wf2ervc0d2n#h)qb)y5@ly$t*@w'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openwisp_utils.admin_theme',
'django_netjsongraph',
'django.contrib.admin',
# rest framework
'rest_framework',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openwisp_utils.staticfiles.DependencyFinder',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'en-gb'
USE_TZ = True
USE_I18N = False
USE_L10N = False
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path. | join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
} | ,
},
]
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse',},
'require_debug_true': {'()': 'django.utils.log.RequireDebugTrue',},
},
'formatters': {
'simple': {'format': '[%(levelname)s] %(message)s'},
'verbose': {
'format': '\n\n[%(levelname)s %(asctime)s] module: %(module)s, process: %(process)d, thread: %(thread)d\n%(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'main_log': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(BASE_DIR, 'error.log'),
'maxBytes': 5242880.0,
'backupCount': 3,
'formatter': 'verbose',
},
},
'root': {'level': 'INFO', 'handlers': ['main_log', 'console', 'mail_admins'],},
'loggers': {'py.warnings': {'handlers': ['console'],}},
}
TEST_RUNNER = "django_netjsongraph.tests.utils.LoggingDisabledTestRunner"
# local settings must be imported before test runner otherwise they'll be ignored
try:
from local_settings import *
except ImportError:
pass
|
maartenbreddels/vaex | tests/getattr_test.py | Python | mit | 3,825 | 0.005229 | from common import *
def test_column_subset(ds_local):
ds = ds_local
dss = ds[['x', 'y']]
assert dss.get_column_names() == ['x', 'y']
np.array(dss) # test if all columns can be put in arrays
def test_column_subset_virtual(ds_local):
ds = ds_local
ds['r'] = ds.x + ds.y
dss = ds[['r']]
assert dss.get_column_names() == ['r']
assert set(dss.get_column_names(hidden=True)) == set(['__x', '__y', 'r'])
np.array(dss) # test if all columns can be put in arrays
def test_column_subset_virtual_recursive(df_local_non_arrow):
df = df_local_non_arrow
df['r'] = df.x + df.y
df['q'] = df.r/2
dfs = df[['q']]
assert dfs.get_column_names() == ['q']
all_columns = set(dfs.get_column_names(hidden=True))
assert all_columns == set(['__x', '__y', '__r', 'q'])
np.array(dfs) # test if all columns can be put in arrays
def test_column_subset_virtual(ds_filtered):
ds = ds_filtered
dss = ds[['y']]
assert dss.get_column_names() == ['y']
all_columns = set(dss.get_column_names(hidden=True))
assert all_columns == set(['__x', 'y'])
np.array(dss) # test if all columns can be put in arrays, with the possible filter copied as hidden
# 'nested' filter
ds = ds[ds.y > 2]
dss = ds[['m']]
assert dss.get_column_names() == ['m']
assert set(dss.get_column_names(hidden=True)) == set(['__x', '__y', 'm'])
def test_column_order(ds_local):
ds = ds_local
dss = ds[['x', 'y']]
assert dss.get_column_names() == ['x', 'y']
assert np.array(dss).T.tolist() == [ds.x.values.tolist(), ds.y.values.tolist()]
dss = ds[['y', 'x']]
assert dss.get_column_names() == ['y', 'x']
assert np.array(dss).T.tolist() == [ds.y.values.tolist(), ds.x.values.tolist()]
def test_column_order_virtual(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
ds['r'] = ds.y + 10
ds = ds_local
dss = ds[['x', 'r']]
assert dss.get_column_names() == ['x', 'r']
assert np.array(dss).T.tolist() == [ds.x.values.tolist(), ds.r.values.tolist()]
dss = ds[['r', 'x']]
assert dss.get_column_names() == ['r', 'x']
assert np.array(dss).T.tolist() == [ds.r.values.tolist(), ds.x.values.tolist()]
def test_expression(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
dss = ds[['y/10', 'x/5']]
assert 'y' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.y/10).values.tolist(), (ds.x/5).values.tolist()]
@pytest.mark.skip(reason="Not implemented yet, should work, might need refactoring of copy")
def test_expression_virtual(ds_local):
ds = ds_local
# this will do some name mangling, but we don't care about the names
ds['r'] = ds.y + 10
dss = ds[['r/10', 'x/5']]
assert 'r' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.r/10).values.tolist(), (ds.x/5).values.tolist()]
dss = ds[['x/5', 'r/10']]
assert 'r' in dss.get_column_names()[0]
assert 'x' in dss.get_column_names()[1]
assert np.array(dss).T.tolist() == [(ds.x/5).values.tolist(), (ds.r/10).values.tolist()]
def test_access_data_after_virtual_column_creation(ds_local):
ds = ds_local
# we can access the x column
assert ds[['x']].values[:,0].tolist() == ds.x.values.tolist()
ds['virtual'] = ds.x * 2
# it should also work after we adde | d a virtual column
assert ds[['x']].values[:,0].tolist() == | ds.x.values.tolist()
def test_non_existing_column(df_local):
df = df_local
with pytest.raises(NameError, match='.*Did you.*'):
df['x_']
def test_alias(df_local):
df = df_local
df2 = df[['123456']]
assert '123456' in df2
|
atvcaptain/enigma2 | lib/python/Components/Renderer/AnalogClockLCD.py | Python | gpl-2.0 | 3,341 | 0.034122 | from __future__ import absolute_import
# original code is from openmips gb Team: [OMaClockLcd] Renderer #
# Thx to arn354 #
import math
from Components.Renderer.Renderer import Renderer
from skin import parseColor
from enigma import eCanvas, eSize, gRGB, eRect
from Components.VariableText import VariableText
from Components.config import config
class AnalogClockLCD(Renderer):
def __init__(self):
Renderer.__init__(self)
self.fColor = gRGB(255, 255, 255, 0)
self.fColors = gRGB(255, 0, 0, 0)
self.fColorm = gRGB(255, 0, 0, 0)
self.fColorh = gRGB(255, 255, 255, 0)
self.bColor = gRGB(0, 0, 0, 255)
self.forend = -1
self.linewidth = 1
self.positionheight = 1
self.positionwidth = 1
self.linesize = 1
GUI_WIDGET = eCanvas
def applySkin(self, desktop, parent):
attribs = []
for (attrib, what,) in self.skinAttributes:
if (attrib == 'hColor'):
self.fColorh = parseColor(what)
elif (attrib == 'mColor'):
self.fColorm = parseColor(what)
elif (attrib == 'sColor'):
self.fColors = parseColor(what)
elif (attrib == 'linewidth'):
self.linewidth = int(what)
elif (attrib == 'positionheight'):
self.positionheight = int(what)
elif (attrib == 'positionwidth'):
self.positionwidth = int(what)
elif (attrib == 'linesize'):
self.linesize = int(what)
else:
attribs.append((attrib, what))
self.skinAttributes = attribs
return Renderer.applySkin(self, desktop, parent)
def calc(self, w, r, m, m1):
a = (w * 6)
z = (math.pi / 180)
x = int(round((r * math.sin((a * z)))))
y = int(round((r * math.cos((a * z)))))
return ((m + x), (m1 - y))
def hand(self, opt):
width = self.positionwidth
height = self.positionheight
r = (width / 2)
r1 = (height / 2)
l = self.linesize
if opt == 'sec':
l = self.linesize
self.fColor = self.fColors
elif opt == 'min':
l = self.linesize
self.fColor = self.fColorm
else:
self.fColor = self.fColorh
(endX, endY,) = self.calc(self.forend, l, r, r1)
self.line_draw(r, r1, endX, endY)
def line_draw(self, x0, y0, x1, y1):
steep = (abs((y1 - y0)) > abs((x1 - x0)))
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if (x0 > x1):
x0, x1 = x1, x0
y0, y1 = y1, y0
if (y0 < y1):
ystep = 1
else:
ystep = -1
deltax = (x1 - x0)
deltay = abs((y1 - y0))
error = (-deltax / 2)
y = y0
for x in range(x0, (x1 + 1)):
if steep:
self.instance.fillRect(eRect(y, x, self.linewidth, self.linewidth), self.fColor)
else:
self.instance.fillRect(eRect(x, | y, self.linewidth, self.linewidth), self.fColor)
error = (error + deltay)
if (error > 0):
y = (y + ystep) |
error = (error - deltax)
def changed(self, what):
opt = (self.source.text).split(',')
try:
sopt = int(opt[0])
if len(opt) < 2:
opt.append('')
except Exception as e:
return
if (what[0] == self.CHANGED_CLEAR):
pass
elif self.instance:
self.instance.show()
if (self.forend != sopt):
self.forend = sopt
self.instance.clear(self.bColor)
self.hand(opt[1])
def parseSize(self, str):
(x, y,) = str.split(',')
return eSize(int(x), int(y))
def postWidgetCreate(self, instance):
for (attrib, value,) in self.skinAttributes:
if ((attrib == 'size') and self.instance.setSize(self.parseSize(value))):
pass
self.instance.clear(self.bColor)
|
DemocracyClub/yournextrepresentative | ynr/apps/uk_results/migrations/0023_auto_20160505_1636.py | Python | agpl-3.0 | 322 | 0 | from django.db import migrations
class Migration(migrations.Mig | ration):
dependencies = [("uk_results", "0022_postresult_confirmed_resultset")]
operations = [
migrations.AlterModelOptions(
name="candidateresult",
options={"ordering": | ("-num_ballots_reported",)},
)
]
|
somethingnew2-0/distadmin | distadmin/users/migrations/0002_set_site_domain_and_name.py | Python | mit | 4,351 | 0.007125 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.conf import settings
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Set site domain and name."""
Site = orm['sites.Site']
site = Site.objects.get(id=settings.SITE_ID)
site.domain = "example.com"
site.name = "distadmin"
site.save()
def backwards(self, orm):
"""Revert site domain and name to default."""
Site = orm['sites.Site']
site = Site.objects.get(id=settings.SITE_ID)
site.domain = 'example.com'
site.name = 'example.com'
site.save()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
| 'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTime | Field', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['sites', 'users']
symmetrical = True |
xinghai-sun/models | ssd/config/pascal_voc_conf.py | Python | apache-2.0 | 2,573 | 0 | from easydict import EasyDict as edict
import numpy as np
__C = edict()
cfg = __C
__C.TRAIN = edict()
__C.IMG_WIDTH = 300
__C.IMG_HEIGHT = 300
__C.IMG_CHANNEL = 3
__C.CLASS_NUM = 21
__C.BACKGROUND_ID = 0
# training settings
__C.TRAIN.LEARNING_RATE = 0.001 / 4
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.BATCH_SIZE = 32
__C.TRAIN.NUM_PASS = 200
__C.TRAIN.L2REGULARIZATION = 0.0005 * 4
__C.TRAIN.LEARNING_RATE_DECAY_A = 0.1
__C.TRAIN.LEARNING_RATE_DECAY_B = 16551 * 80
__C.TRAIN.LEARNING_RATE_SCHEDULE = 'discexp'
__C.NET = edict()
# configuration for multibox_loss_layer
__C.NET.MBLOSS = edict()
__C.NET.MBLOSS.OVERLAP_THRESHOLD = 0.5
__C.NET.MBLOSS.NEG_POS_RATIO = 3.0
__C.NET.MBLOSS.NEG_OVERLAP = 0.5
# configuration for detection_map
__C.NET.DETMAP = edict()
__C.NET.DETMAP.OVERLAP_THRESHOLD = 0.5
__C.NET.DETMAP.EVAL_DIFFICULT = False
__C.NET.DETMAP.AP_TYPE = "11point"
# configuration for detection_output_layer
__C.NET.DETOUT = edict()
__C.NET.DETOUT.CONFIDENCE_THRESHOLD = 0.01
__C.NET.DETOUT.NMS_THRESHOLD = 0.45
__C.NET.DETOUT.NMS_TOP_K = 400
__C.NET.DETOUT.KEEP_TOP_K = 200
# configuration for priorbox_layer from conv4_3
__C.NET.CONV4 = edict()
__C.NET.CONV4.PB = edict()
__C.NET.CONV4.PB.MIN_SIZE = [30]
__C.NET.CONV4.PB.ASPECT_RATIO = [2.]
__C.NET.CONV4.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
# configuration for priorbox_layer fr | om fc7
__C | .NET.FC7 = edict()
__C.NET.FC7.PB = edict()
__C.NET.FC7.PB.MIN_SIZE = [60]
__C.NET.FC7.PB.MAX_SIZE = [114]
__C.NET.FC7.PB.ASPECT_RATIO = [2., 3.]
__C.NET.FC7.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
# configuration for priorbox_layer from conv6_2
__C.NET.CONV6 = edict()
__C.NET.CONV6.PB = edict()
__C.NET.CONV6.PB.MIN_SIZE = [114]
__C.NET.CONV6.PB.MAX_SIZE = [168]
__C.NET.CONV6.PB.ASPECT_RATIO = [2., 3.]
__C.NET.CONV6.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
# configuration for priorbox_layer from conv7_2
__C.NET.CONV7 = edict()
__C.NET.CONV7.PB = edict()
__C.NET.CONV7.PB.MIN_SIZE = [168]
__C.NET.CONV7.PB.MAX_SIZE = [222]
__C.NET.CONV7.PB.ASPECT_RATIO = [2., 3.]
__C.NET.CONV7.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
# configuration for priorbox_layer from conv8_2
__C.NET.CONV8 = edict()
__C.NET.CONV8.PB = edict()
__C.NET.CONV8.PB.MIN_SIZE = [222]
__C.NET.CONV8.PB.MAX_SIZE = [276]
__C.NET.CONV8.PB.ASPECT_RATIO = [2., 3.]
__C.NET.CONV8.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
# configuration for priorbox_layer from pool6
__C.NET.POOL6 = edict()
__C.NET.POOL6.PB = edict()
__C.NET.POOL6.PB.MIN_SIZE = [276]
__C.NET.POOL6.PB.MAX_SIZE = [330]
__C.NET.POOL6.PB.ASPECT_RATIO = [2., 3.]
__C.NET.POOL6.PB.VARIANCE = [0.1, 0.1, 0.2, 0.2]
|
Nic30/hwtLib | hwtLib/examples/errors/errors_test.py | Python | mit | 2,037 | 0 | import unittest
from hwt.synthesizer.exceptions import TypeConversionErr
from hwt.synthesizer.rtlLevel.signalUtils.exceptions import \
SignalDriverErr
from hwt.synthesizer.utils import to_rtl_str |
from hwtLib.examples.errors.accessingSubunitInternalIntf import \
AccessingSubunitInternalIntf
from hwtLib.examples.errors.inconsistentIntfDirection import \
InconsistentIntfDirection
f | rom hwtLib.examples.errors.invalidTypeConnetion import InvalidTypeConnetion
from hwtLib.examples.errors.multipleDriversOfChildNet import \
MultipleDriversOfChildNet, MultipleDriversOfChildNet2
from hwtLib.examples.errors.unusedSubunit import UnusedSubunit, UnusedSubunit2
class ErrorsTC(unittest.TestCase):
def test_invalidTypeConnetion(self):
u = InvalidTypeConnetion()
with self.assertRaises(TypeConversionErr):
to_rtl_str(u)
def test_inconsistentIntfDirection(self):
u = InconsistentIntfDirection()
with self.assertRaises(SignalDriverErr):
to_rtl_str(u)
def test_multipleDriversOfChildNet(self):
u = MultipleDriversOfChildNet()
with self.assertRaises((SignalDriverErr, AssertionError)):
to_rtl_str(u)
def test_multipleDriversOfChildNet2(self):
u = MultipleDriversOfChildNet2()
with self.assertRaises(SignalDriverErr):
to_rtl_str(u)
def test_unusedSubunit(self):
u = UnusedSubunit()
with self.assertRaises(SignalDriverErr):
to_rtl_str(u)
def test_unusedSubunit2(self):
u = UnusedSubunit2()
with self.assertRaises(SignalDriverErr):
to_rtl_str(u)
def test_accessingSubunitInternalIntf(self):
u = AccessingSubunitInternalIntf()
with self.assertRaises(AssertionError):
to_rtl_str(u)
if __name__ == '__main__':
suite = unittest.TestSuite()
# suite.addTest(ErrorsTC('testBitAnd'))
suite.addTest(unittest.makeSuite(ErrorsTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
simark/simulavr | regress/test_opcodes/test_BLD.py | Python | gpl-2.0 | 2,630 | 0.018251 | #! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation | , Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
#
# $Id: test_BLD.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the BLD opcode.
"""
import base_test
from registers | import Reg, SREG
class BLD_TestFail(base_test.TestFail): pass
class base_BLD(base_test.opcode_test):
"""Generic test case for testing BLD opcode.
Bit load from the T flag in SREG to bin in register.
opcode is '1111 100d dddd 0bbb' where d is register and b is the register bit.
Only registers PC and Rd should be changed.
"""
def setup(self):
# set the T flag in sreg
self.setup_regs[Reg.SREG] = self.T << SREG.T
# set the given register's bits to complement of T value
if self.T == 0:
self.setup_regs[self.reg] = 0xff
else:
self.setup_regs[self.reg] = 0x0
return 0xF800 | (self.reg << 4) | self.bit
def analyze_results(self):
self.reg_changed.append(self.reg)
# check that register value is correct
if self.T == 0:
expect = 0xff & ~(1 << self.bit)
else:
expect = (1 << self.bit)
got = self.anal_regs[self.reg]
if expect != got:
self.fail('r%02d bit %d not T(%d): expect=%02x, got=%02x' % (
self.reg, self.bit, self.T, expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class BLD_r%02d_bit%d_T%d_TestFail(BLD_TestFail): pass
class test_BLD_r%02d_bit%d_T%d(base_BLD):
reg = %d
bit = %d
T = %d
def fail(self,s):
raise BLD_r%02d_bit%d_T%d_TestFail, s
"""
#
# automagically generate the test_BLD_rNN_bitN_T[01] class definitions
#
code = ''
for t in (0,1):
for r in range(32):
for b in range(8):
code += template % (r,b,t, r,b,t, r,b,t, r,b,t)
exec code
|
malcolmpl/fleetpanel | FleetPanel.py | Python | mit | 83,136 | 0.015276 | #!/usr/bin/python
# vim:sw=4:softtabstop=4:expandtab:set fileencoding=ISO8859-2
#
# FleetPanel.py, part of the FleetPanel
#
# Copyright (c) 2008-2009 Pawe³ 'Reef' Polewicz
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution. The terms
# are also available at http://www.opensource.org/licenses/mit-license.php.
print "start of imports"
import BaseHTTPServer
from BaseHTTPServer import HTTPServer
import cgi, random, sys
import httplib, urllib # for api
import sys
import pickle
import time
import os
from datetime import datetime, timedelta, date
from threading import Lock
from SocketServer import ThreadingMixIn
from string import capwords
import threading
import string
import copy
import Drawing
from Signature import Signature
from JumpBridge import JumpBridge
from Db_connection import Db_connection
from Roman import Roman
from Player import Player
from PilotList import PilotList
from CacheObject import CacheObject
from Stats import Stats
print "end of imports"
global g_allowed_alliance_ids
g_allowed_alliance_ids = ["-1"] # YOU MUST FILL THIS
global g_allowed_corp_ids
g_allowed_corp_ids = ["-1"] # YOU MUST FILL THIS
global global_refresh_rate
global_refresh_rate = 600 # how often should the panel autorefresh (apart from refresh after jump/undock etc)
global g_home_system
g_home_system = "Jita"
global g_password
g_password = {}
g_password[1] = "password1" # normal usage if the panel is restricted
g_password[2] = "password2" # fleet commanders
g_password[3] = "password3" # superadmin
global g_force_draw_jump_bridges
g_force_draw_jump_bridges = True
global g_print_headers_when_alliance_invalid
g_print_headers_when_alliance_invalid = True
###### NO NEED TO EDIT BELOW THIS LINE (apart from corporation api stuff) ######
def getMemberDataCache(charid, userid, apikey, corpname, cacheObject):
| if not cacheObject.has(corpname) or cacheObject.expired(corpname):
newdata, expirationTime = getMemberData(charid, userid, apikey, corpname)
cacheObject.set(corpname, newdata, expirationTime)
return newdata
else:
return cacheObject.get(corpname)
def getMemberData(charid, userid, apikey, corpname):
# based on the Eve online api example
params = urllib.urlencode( {
'characterID': charid,
| 'userid': userid,
'apikey': apikey,
} )
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection("api.eve-online.com")
conn.request("POST", "/corp/MemberTracking.csv.aspx", params, headers)
response = conn.getresponse()
rawdata = response.read()
dump = open("getMemberDataRaw/" + str( int( time.time() ) ), "w")
dump.write(rawdata)
dump.close()
pilotlist = []
for row in rawdata.split('\n'):
rowlist = row.split(',') # character,start date,last logon,last logoff,base,location,ship,title,roles,grantable roles
if len(rowlist)<7: # newline on the very end
continue
nick = rowlist[0]
if nick=="" or nick=="character":
continue
pilotDict = {}
pilotDict['nick'] = rowlist[0]
pilotDict['startdate'] = rowlist[1]
pilotDict['lastlogon'] = rowlist[2]
pilotDict['lastlogoff'] = rowlist[3]
pilotDict['base'] = rowlist[4]
pilotDict['location'] = rowlist[5]
pilotDict['ship'] = rowlist[6]
pilotDict['title'] = rowlist[7]
pilotDict['roles'] = rowlist[8]
pilotDict['grantableroles'] = rowlist[9].strip()
pilotDict['corpname'] = corpname
pilotlist.append(pilotDict)
# FIXME: api cache must be implemented
#cachedUntil = time.time()
conn.request("POST", "/corp/MemberTracking.xml.aspx", params, headers)
response = conn.getresponse()
cachedUntil = response.read().split('\n')[-2].split('>')[1].split('<')[0]
#print "CACHED UNTIL:", cachedUntil
conn.close()
return pilotlist, cachedUntil
def unpickle(filename):
success = False
try:
pkl_file = open(filename, 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
success = True
except:
# file missing?
data = None
#finally:
# pkl_file.close()
return success, data
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
allow_reuse_address = True
daemon_threads = True
# /////////////////////////////////////// GENERAL \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
CACHED = 2
li_shiptypes_ordered = [ "Unknown", "TITAN", "Mothership", "DREAD", "CAPS", "Black Ops", "Marauder", "BS", "Command", "BC", "HAC", "HIC", "Recon", "Logistic", "Cruiser", "DICTOR", "Destroyer", "Covert", "CEP", "Frigate", "NOOBSHIP", "Capital Industrial Ship", "Jump Freighter", "Freighter", "Transport", "Industrial", "Electronic Attack", "Assault", "Mining Barge", "Exhumer", "Shuttle", "EGG" ]
def do_GET(self):
self.decide_panel("GET")
def do_POST(self):
self.decide_panel("POST")
def decide_panel(self, method):
if self.path=="/favicon.ico":
self.send_response(404)
return
if not self.headers.has_key('Host'):
return
self.host = self.headers['Host']
self.generate_start_time = time.time()
self.out_init()
global allow_global_refresh_rate_change
# TODO: get_password()
self.password = ""
q_mark = self.path.find("?")
self.refresh_path = self.path
if q_mark != -1:
self.password = self.path[q_mark+1:]
self.path = self.path[:q_mark]
self.clearance = self.security_clearance()
image = False
if not self.headers.has_key('Eve.trusted') or self.headers['Eve.trusted']=="no":
self.handle_untrusted()
elif allow_global_refresh_rate_change and self.path.startswith("/set_global_refresh_rate/"):
self.handle_global_refresh_rate_change()
elif self.path=="/img/map.png":
if not self.security_query("map_image_data"):
self.send_response(403)
return
self.handle_map()
image = True
elif self.path=="/img/stats.png":
if not self.security_query("stats_image_data"):
self.send_response(403)
return
self.handle_stats()
image = True
else: # war panel
self.handle_war_panel(method)
if not image:
self.makefooter()
self.out_commit()
def send_headers_html(self):
self.send_headers_by_type("text/html")
def send_headers_png(self):
self.send_headers_by_type("image/png")
def send_headers_by_type(self, content_type):
self.send_response(200)
self.send_header("Pragma", "no-cache")
self.send_header("Cache-Control", "no-cache")
self.send_header("Content-type", content_type)
def handle_war_panel(self, method):
self.send_headers_html()
out = self.out
global global_refresh_rate
self.refresh_rate = global_refresh_rate
global g_allowed_alliance_ids
global g_allowed_corp_ids
if not self.headers.has_key('Eve.trusted'):
self.end_headers()
self.makeheader()
out("<br>Use Eve InGameBrowser!")
elif method=="POST" and not self.headers.has_key('Eve.Charid'):
self.end_headers()
self.makeheader()
out("<br>NOT EVE-TRUSTED! PLEASE ADD THIS SITE TO TRUSTED in OPTIONS/TRUSTED SITES!")
#for h in self.headers:
# out("<br>" + h + ": " + self.headers[h])
#elif not self.headers.has_key('eve.allianceid') or self.headers['eve.allianceid'] not in g_allowed_alliance_ids: # not our alliance?
elif not self.headers.has_key('eve.corpid') or self.headers['eve.corpid'] not in g_allowed_corp_ids: # TODO: FIXME: HACK
self.end_headers()
self.ma |
stallmanifold/cs229-machine-learning-stanford-fall-2016 | src/homework2/q3/svm_train.py | Python | apache-2.0 | 3,651 | 0.006574 | import numpy as np
import pandas as pd
def train(df_train, tau, max_iters):
Xtrain = df_train.iloc[:, 1:].as_matrix()
ytrain = df_train.iloc[:, 0].as_matrix()
classifier = SVM(Xtrain, ytrain, tau, max_iters)
return classifier
class SVM:
def __init__(self, X, y, tau, max_iters):
"""
Xtrain is a (num_train_docs x num_tokens) sparse matrix.
Each row represents a unique document (email).
The j-th column of the row $i$ represents if the j-th token appears in
email i.
tokenlist is a long string containing the list of all tokens (words).
These tokens are easily known by position in the file TOKENS_LIST
trainCategory is a (1 x numTrainDocs) vector containing the true
classifications for the documents just read in. The i-th entry gives the
correct class for the i-th email (which corresponds to the i-th row in
the document word matrix).
Spam documents are indicated as class 1, and non-spam as class 0.
For the SVM, we convert these to +1 and -1 to form the numTrainDocs x 1
vector ytrain.
This vector should be output by this method
average_alpha = np.zeros(num_train_docs)
"""
# Make y be a vector of +/-1 labels and X be a {0, 1} matrix.
Xtrain = 1 * (X > 0)
ytrain = (2 * y - 1).T
num_train_docs = Xtrain.shape[0]
num_tokens = Xtrain.shape[1]
Xtrain_squared = np.sum(Xtrain * Xtrain, axis=1).reshape((num_train_docs, 1))
gram_train = np.dot(Xtrain, Xtrain.T)
# Vectorized Ktrain.
Ktrain = np.exp(-(np.tile(Xtrain_squared, (1, num_train_docs))
+ np.tile(Xtrain_squared.T, (num_train_docs, 1))
- 2 * gram_train) / (2 * np.power(tau, 2)))
# lambda
lam = 1 / (64 * num_train_docs)
alpha = np.zeros(n | um_tr | ain_docs)
average_alpha = np.zeros(num_train_docs)
t = 0
for _ in range(max_iters * num_train_docs):
t += 1
idx = np.random.randint(num_train_docs)
# margin = ytrain[idx] * Ktrain.T[:, idx] * alpha
margin = ytrain[idx] * Ktrain[idx, :].dot(alpha)
grad = -((margin < 1) * ytrain[idx] * Ktrain[:, idx]) + \
num_train_docs * lam * (Ktrain[:, idx] * alpha[idx])
eta = 1.0 / np.sqrt(t)
alpha = alpha - eta * grad
average_alpha = average_alpha + alpha
average_alpha = average_alpha / (max_iters * num_train_docs)
self.Xtrain = Xtrain
self.yTrain = ytrain
self.Ktrain = Ktrain
self.alpha = average_alpha
self.tau = tau
self.lam = lam
def classify(self, X):
"""
Classify a matrix of testing data.
"""
num_test_docs = X.shape[0]
num_train_docs = self.Xtrain.shape[0]
alpha = self.alpha
tau = self.tau
Xtrain = self.Xtrain
Xtest = 1 * (X > 0)
Xtest_squared = np.sum(Xtest * Xtest, axis=1).reshape((num_test_docs, 1))
Xtrain_squared = np.sum(Xtrain * Xtrain, axis=1).reshape((num_train_docs, 1))
gram_test = np.dot(Xtest, Xtrain.T)
# Vectorized Ktest.
Ktest = np.exp(-(np.tile(Xtest_squared, (1, num_train_docs))
+ np.tile(Xtrain_squared.T, (num_test_docs, 1))
- 2 * gram_test) / (2 * np.power(tau, 2)))
predictions = Ktest.dot(alpha)
predictions = 2 * (predictions > 0) - 1
return predictions
|
adityaduggal/erpnext | erpnext/config/desktop.py | Python | gpl-3.0 | 11,448 | 0.047182 | # coding=utf-8
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Item",
"_doctype": "Item",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Item"
},
{
"module_name": "Customer",
"_doctype": "Customer",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "link",
"link": "List/Customer"
},
{
"module_name": "Supplier",
"_doctype": "Supplier",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "link",
"link": "List/Supplier"
},
{
"_doctype": "Employee",
"module_name": "Employee",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"type": "link",
"link": "List/Employee"
},
{
"module_name": "Project",
"_doctype": "Project",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "link",
"li | nk": "List/Project"
},
{
"module_name": "Is | sue",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"_doctype": "Issue",
"type": "link",
"link": "List/Issue"
},
{
"module_name": "Lead",
"icon": "octicon octicon-broadcast",
"type": "module",
"_doctype": "Lead",
"type": "link",
"link": "List/Lead"
},
{
"module_name": "Profit and Loss Statement",
"_doctype": "Account",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "link",
"link": "query-report/Profit and Loss Statement"
},
# old
{
"module_name": "Accounts",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"color": "#f39c12",
"icon": "fa fa-truck",
"icon": "octicon octicon-package",
"type": "module",
"hidden": 1
},
{
"module_name": "CRM",
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module",
"hidden": 1
},
{
"module_name": "Selling",
"color": "#1abc9c",
"icon": "fa fa-tag",
"icon": "octicon octicon-tag",
"type": "module",
"hidden": 1
},
{
"module_name": "Buying",
"color": "#c0392b",
"icon": "fa fa-shopping-cart",
"icon": "octicon octicon-briefcase",
"type": "module",
"hidden": 1
},
{
"module_name": "HR",
"color": "#2ecc71",
"icon": "fa fa-group",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module",
"hidden": 1
},
{
"module_name": "Manufacturing",
"color": "#7f8c8d",
"icon": "fa fa-cogs",
"icon": "octicon octicon-tools",
"type": "module",
"hidden": 1
},
{
"module_name": "POS",
"color": "#589494",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos",
"label": _("POS")
},
{
"module_name": "Leaderboard",
"color": "#589494",
"icon": "octicon octicon-graph",
"type": "page",
"link": "leaderboard",
"label": _("Leaderboard")
},
{
"module_name": "Projects",
"color": "#8e44ad",
"icon": "fa fa-puzzle-piece",
"icon": "octicon octicon-rocket",
"type": "module",
"hidden": 1
},
{
"module_name": "Support",
"color": "#2c3e50",
"icon": "fa fa-phone",
"icon": "octicon octicon-issue-opened",
"type": "module",
"hidden": 1
},
{
"module_name": "Learn",
"color": "#FF888B",
"icon": "octicon octicon-device-camera-video",
"type": "module",
"is_help": True,
"label": _("Learn"),
"hidden": 1
},
{
"module_name": "Maintenance",
"color": "#FF888B",
"icon": "octicon octicon-tools",
"type": "module",
"label": _("Maintenance"),
"hidden": 1
},
{
"module_name": "Student",
"color": "#c0392b",
"icon": "octicon octicon-person",
"label": _("Student"),
"link": "List/Student",
"_doctype": "Student",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Group",
"color": "#d59919",
"icon": "octicon octicon-organization",
"label": _("Student Group"),
"link": "List/Student Group",
"_doctype": "Student Group",
"type": "list",
"hidden": 1
},
{
"module_name": "Course Schedule",
"color": "#fd784f",
"icon": "octicon octicon-calendar",
"label": _("Course Schedule"),
"link": "Calendar/Course Schedule",
"_doctype": "Course Schedule",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Attendance Tool",
"color": "#C0392B",
"icon": "octicon octicon-checklist",
"label": _("Student Attendance Tool"),
"link": "List/Student Attendance Tool",
"_doctype": "Student Attendance Tool",
"type": "list",
"hidden": 1
},
{
"module_name": "Course",
"color": "#8e44ad",
"icon": "octicon octicon-book",
"label": _("Course"),
"link": "List/Course",
"_doctype": "Course",
"type": "list",
"hidden": 1
},
{
"module_name": "Program",
"color": "#9b59b6",
"icon": "octicon octicon-repo",
"label": _("Program"),
"link": "List/Program",
"_doctype": "Program",
"type": "list",
"hidden": 1
},
{
"module_name": "Student Applicant",
"color": "#4d927f",
"icon": "octicon octicon-clippy",
"label": _("Student Applicant"),
"link": "List/Student Applicant",
"_doctype": "Student Applicant",
"type": "list",
"hidden": 1
},
{
"module_name": "Fees",
"color": "#83C21E",
"icon": "fa fa-money",
"label": _("Fees"),
"link": "List/Fees",
"_doctype": "Fees",
"type": "list",
"hidden": 1
},
{
"module_name": "Instructor",
"color": "#a99e4c",
"icon": "octicon octicon-broadcast",
"label": _("Instructor"),
"link": "List/Instructor",
"_doctype": "Instructor",
"type": "list",
"hidden": 1
},
{
"module_name": "Room",
"color": "#f22683",
"icon": "fa fa-map-marker",
"label": _("Room"),
"link": "List/Room",
"_doctype": "Room",
"type": "list",
"hidden": 1
},
{
"module_name": "Education",
"color": "#428B46",
"icon": "octicon octicon-mortar-board",
"type": "module",
"label": _("Education"),
"hidden": 1
},
{
"module_name": "Healthcare",
"color": "#FF888B",
"icon": "fa fa-heartbeat",
"type": "module",
"label": _("Healthcare"),
"hidden": 1
},
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient"),
"hidden": 1
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment"),
"hidden": 1
},
{
"module_name": "Consultation",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Consultation",
"type": "link",
"link": "List/Consultation",
"label": _("Consultation"),
"hidden": 1
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "list",
"link": "List/Lab Test",
"label": _("Lab Test"),
"hidden": 1
},
{
"module_name": "Hub",
"color": "#009248",
"icon": "/assets/erpnext/images/hub_logo.svg",
"type": "page",
"link": "hub",
"label": _("Hub")
},
{
"module_name": "Data Import",
"color": "#FFF168",
"reverse": 1,
"doctype": "Data Import",
"icon": "octicon octicon-cloud-upload",
"label": _("Data Import"),
"link": "List/Data Import",
"type": "list"
},
{
"module_name": "Restaurant",
"color": "#EA81E8",
"icon": "🍔",
"_doctype": "Restaurant",
"type": "list",
"link": "List/Restaurant",
"label": _("Restaurant"),
"hidden": 1
},
{
"module_name": "Agriculture",
"color": "#8BC34A",
"icon": "octicon octicon-globe",
"type": "module",
"label": _("Agriculture"),
"hidden": 1
},
{
"module_name": "Crop",
"_doctype": "Crop",
"label": _("Crop"),
"color": "#8BC34A",
"icon": "fa fa-tree",
"type": "list",
"link": "List/Crop",
"hidden": 1
},
{
"module_name": "Crop Cycle",
"_doctype": "Crop Cycle",
|
thethythy/Mnemopwd | mnemopwd/server/util/__init__.py | Python | bsd-2-clause | 1,500 | 0.002667 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# a | re permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# T | HIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Thierry Lemeunier <thierry at lemeunier dot net>"
__date__ = "$9 sept. 2015 17:10:29$"
|
chirpradio/chirpradio-machine | chirp/common/mp3_frame.py | Python | apache-2.0 | 8,719 | 0.000803 | """
Convert a stream into a sequence of MPEG audio frames.
"""
from chirp.common import id3_header
from chirp.common import mp3_header
# The size of the chunks of data we read from file_obj. The largest
# possible MP3 frame is 1045 bytes; this value should be larger than
# that. Keeping the value low helps performance by minimizing the
# number of string copies.
_READ_SIZE = 4 << 10 # 4k
def split(file_obj, expected_hdr=None):
"""Extract a sequence of MPEG audio frames from a file-like object.
Args:
file_obj: A file-like object
expected_hdr: If given, only yield frames matching this MP3Header
template
Yields:
A (hdr, data_buffer) pair.
If 'hdr' is None, data buffer contains non-MPEG-audio junk that
was found inside the stream. Otherwise 'hdr' is an MP3Header object
and 'data_buffer' contains the MP3 frame.
"""
def block_generator():
while True:
block = file_obj.read(_READ_SIZE)
if not block:
break
yield block
for hdr, data_buffer in split_blocks(block_generator(),
expected_hdr=expected_hdr):
yield hdr, data_buffer
def split_blocks(block_iter, expected_hdr=None):
"""Extract a sequence of MPEG audio frames from a stream of data blocks.
Args:
block_iter: An iterable object that yields a sequence of data
blocks.
expected_hdr: If given, only yield frames matching this MP3Header
template
Yields:
A (hdr, data_buffer) pair.
If 'hdr' is None, data buffer contains non-MPEG-audio junk that
was found inside the stream. Otherwise 'hdr' is an MP3Header object
and 'data_buffer' contains the MP3 frame.
"""
buffered = ''
current_hdr = None
at_end_of_stream = False
to_be_skipped = 0
while True:
# First we skip data if necessary.
while to_be_skipped > 0:
assert current_hdr is None
# If we don't have anything in our buffer, pull in the
# next block.
if not buffered:
try:
buffered = block_iter.next()
except StopIteration:
sys.stderr.write(
"Stream ended while skipping data "
"between frames (probably ID3 headers).\n")
at_end_of_stream = True
break
# If the buffer contains less than the amount of data to
# be skipped, yield it all and update to_be_skipped.
# Otherwise slice the amount to be skipped off of the
# front of the buffer.
if len(buffered) <= to_be_skipped:
yield None, buffered
to_be_skipped -= len(buffered)
buffered = ''
else:
yield None, buffered[:to_be_skipped]
buffered = buffered[to_be_skipped:]
to_be_skipped = 0
# We try to have at least _READ_SIZE bytes of data buffered.
if len(buffered) < _READ_SIZE:
# To avoid excess string copies, we collect data in a list
# until we have the desired amount, then concatenate it all
# at the end.
buffered_list = [ buffered ]
buffered_size = len(buffered)
while buffered_size < _READ_SIZE:
try:
next_block = block_iter.next()
except StopIteration:
at_end_of_stream = True
break
buffered_list.append(next_block)
buffered_size += len(next_block)
buffered = ''.join(buffered_list)
# Are we at the end of the file? If so, break out of the
# "while True:" loop
if not buffered:
break
# Do we have an MP3 header? If so, yield the frame and then
# slice it off of our buffer.
if current_hdr:
current_frame = buffered[:current_hdr.frame_size]
# If we found a full-length frame, yield it. Otherwise
# return the truncated frame as junk. (We can be sure not
# to throw away a valid frame since we buffer at least the
# next _READ_SIZE bytes, and _READ_SIZE is larger than any
# possible MP3 frame.
if len(current_frame) != current_hdr.frame_size:
current_hdr = None
yield current_hdr, current_frame
current_hdr = None
buffered = buffered[len(current_frame):]
# Look for the next ID3 header.
id3_size, id3_offset = id3_header.find_size(buffered)
# Look for the next MP3 header.
next_hdr, offset = mp3_header.find(buffered, expected_hdr=expected_hdr)
# If we see an ID3 header before the next MP3 header, skip past the
# ID3. We do this out of paranoia, since an ID3 header might contain
# false synch.
if id3_size is not None and id3_offset < offset:
to_be_skipped = id3_offset + id3_size
continue
# We are starting on this header.
current_hdr = next_hdr
# If we cannot make any progress and are at the end of the
# stream, just return what we have buffered as junk and then
# break out of the loop
if (current_hdr, offset) == (None, 0) and at_end_of_stream:
if buffered:
yield None, buffered
break
# Did we find junk before the next frame? If so, yield it.
if offset > 0:
yield None, buffered[:offset]
buffered = buffered[offset:]
def split_one_block(data, expected_hdr=None):
"""Extract a sequence of MPEG audio frames from a single block of data.
Args:
data: A data buffer containing the data to be split into MPEG frames.
Returns:
A list of (hdr, data_buffer) pairs.
If 'hdr' is None, data buffer contains non-MPEG-audio junk that
was found inside the stream. Otherwise 'hdr' is an MP3Header object
and 'data_buffer' contains the MP3 frame.
"""
return list(split_blocks(iter([data]), expected_hdr=expected_hdr))
# This is a 44.1Khz, 112Kbps joint stereo MPEG frame of digitized
# silence. It was produced by sampling from a Barix with no active
# input. Like any 44.1Khz MPEG frame, it has a duration of
# 26.12245ms.
_DEAD_AIR_DATA = (
"\xff\xfa\x80\x48\x80\x3d\x00\x00\x02\x5f\x09\x4f\x79\x26\x31\x20"
"\x4b\x81\x29\xcf\x24\xc2\x24\x09\x94\x23\x3f\xe3\x18\x44\x81\x1f"
"\x84\x67\x3c\x92\x8c\x90\x8b\xbb\xab\xaa\x77\x6d\x76\x91\xb4\x54"
"\x46\x74\x4e\xa8\xa1\x57\x75\x20\xa3\x8b\x98\xb3\x1e\xd1\xea\x78"
"\x71\x86\xd2\x6d\x49\x71\x93\x93\x91\x45\xaa\x38\x73\xe2\xab\x26"
"\xd8\xe9\xed\xa1\x0b\xb5\xc5\x6f\x36\xb6\x9f\x16\xba\xc4\x8a\x9e"
"\x26\x7d\x75\x54\xf5\xa7\x2c\xb6\x1c\x41\x8a\x75\xf6\xb2\x0d\xac"
"\x06\x2e\xd3\x55\x53\x30\xec\xb6\x59\x23\x44\x4b\x4f\x9a\x0f\x1a"
"\x07\x03\x22\x38\xf1\xa1\xc3\x80\xc8\x25\x81\xe2\xe8\x11\x15\x87"
"\x25\xf2\xeb\x4e\x31\xfd\x41\x6a\xa2\xf5\x20\x28\xbb\x07\x10\x0d"
"\xac\xdb\xcb\x29\xe9\x1f\xd8\x86\xd6\xfa\x48\xe8\x1a\xa8\x9a\xeb"
"\x90\xe1\xe7\x9e\x28\xe3\xe8\x | 15\x2f\xc0\x8f\xa5\x22\xd1\x79\x95"
"\x75\x50\xcf\xbe\xda\xd8\xcd\x70\x00\xd0\x12\xc0\x21\x41\xc4\xa2"
"\x40\xf1\x9c\x10\x9c\x12\xd8\x2a\x94\xcc\xa4\x09\x6c\xe9\x7a\x98"
"\xe6\x15\x06\x5e\x96\xcf\x2b\xd6\xb6\xbb\x16\x68\xd4\ | xa5\xa2\xdc"
"\x4f\x31\x02\xf4\x91\x50\x49\x4f\x58\xc2\xf3\xa6\x49\x0a\xb0\x3f"
"\x1e\x2f\xdd\x7a\xca\x3d\xc3\x03\x54\x1b\x6a\xa9\x0a\x97\x74\x49"
"\x24\xb1\xa2\x2b\x8e\x09\x08\x15\x81\xb1\xc4\x02\x82\x44\xa1\x30"
"\x10\xc4\x21\xe5\x92\xb9\xfa\x49\xa0\x9a\xec\xf5\xbc\x51\x62\xe3"
"\xd3\x60\x55\xac\x78\x77\x27\x4d\xe6\xda\x80\x71\x76\x54\x93\x2f"
"\x52\xe0\x0f\xa9\xee\xb1\x54\x86\x0b\x2d\xf6\xd5\x53\x9a\x2d\x9c"
"\x72\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
_DEAD_AIR_DURATION_MS = mp3_header._SAMPLES_PER_FRA |
pglotov/django-rest-framework-gis | setup.py | Python | mit | 2,039 | 0.000981 | import sys
import os
from setuptools import setup, find_packages
from rest_framework_gis import get_version
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
# skip to next iteration if comment or empty line
if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):
continue
# add line to requirements
requirements.append(line)
return requirements
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
args = {'version': get_version()}
print("You probably want to also tag the version now:")
print(" git tag -a %(version)s -m 'version %(version)s'" % args)
print(" git push --tags")
sys.exit()
setup(
name='djangorestframework-gis',
version=get_version(),
license='BSD',
author='Douglas Meehan',
author_email='dmeehan@gmail.com',
description='Geographic add-ons for Django Rest Framework',
url='https://github.com/djangonauts/django-rest-framework-gis',
download_url='https://github.com/djangonauts/django-rest-framework-gis/releases',
platforms=['Platform Indipendent'],
keywords=['django', | 'rest-framework', 'gis', 'geojson'],
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=get_install_r | equires(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
|
Linguistics-DTU/DTU_8th_Sem_Project | code/bunch-of-taggers.py | Python | gpl-3.0 | 10,340 | 0.017505 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 24 08:43:26 2016
@author: user
"""
#import nltk
# eo_words = nltk.corpus.udhr.word_tokenizer('Esperanto-UTF8')
# eo_words = nltk.corpus.udhr.raw('Esperanto-UTF8')
# eo_words
#print(eo_words)
######################################
"""
English
English-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('English-Latin1'))
######################################
"""
Esperanto
Esperanto-UTF8
"""
import nltk
eo_words = nltk.corpus.udhr.words('Esperanto-UTF8')
nltk.pos_tag(eo_words)
#######################################
"""
German
German_Deutsch-Latin1
"""
import nltk
nltk.pos_tag(n | ltk.corpus.udhr.words('German_Deutsch-Latin1'))
######################################
"""
French
French_Francais-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('French_Francais-Latin1'))
######################################
"""
Russian
Russian-UTF8
Russian_Russky-Cyrillic
Russian_Russky-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Russian-UTF8'))
###################### | ################
"""
Farsi
Farsi_Persian-UTF8
Farsi_Persian-v2-UTF8
"""
## something is wrong as most words are just NNP !!!!
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Farsi_Persian-UTF8'))
######################################
"""
Finnish
Finnish_Suomi-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Finnish_Suomi-Latin1'))
######################################
"""
Hungarian
Hungarian_Magyar-Latin1
Hungarian_Magyar-Latin2
Hungarian_Magyar-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Hungarian_Magyar-Latin1'))
######################################
"""
Turkish
Turkish_Turkce-Turkish
Turkish_Turkce-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Turkish_Turkce-Turkish'))
######################################
"""
Mongolian
Mongolian_Khalkha-Cyrillic
Mongolian_Khalkha-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Mongolian_Khalkha-Cyrillic'))
######################################
"""
Chinese
Chinese_Mandarin-GB2312
"""
## SEEMS WRONG!
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Chinese_Mandarin-GB2312'))
######################################
"""
Japanese
Japanese_Nihongo-EUC
Japanese_Nihongo-SJIS
Japanese_Nihongo-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Japanese_Nihongo-EUC'))
######################################
"""
Korean
Korean_Hankuko-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Korean_Hankuko-UTF8'))
######################################
"""
Hebrew
Hebrew_Ivrit-Hebrew
Hebrew_Ivrit-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Hebrew_Ivrit-Hebrew'))
######################################
"""
Hindi
Hindi-UTF8
Hindi_web-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Hindi-UTF8'))
#######################################
"""
Kazakh
Kazakh-Cyrillic
Kazakh-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Kazakh-UTF8'))
#######################################
"""
Swedish
Swedish_Svenska-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Swedish_Svenska-Latin1'))
#######################################
"""
Icelandic
Icelandic_Yslenska-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Icelandic_Yslenska-Latin1'))
#######################################
"""
Sanskrit
Sanskrit-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Sanskrit-UTF8'))
########################################
"""
Latin
Latin_Latina-Latin1
Latin_Latina-v2-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Latin_Latina-Latin1'))
#######################################
"""
Greek
Greek_Ellinika-Greek
Greek_Ellinika-UTF8
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Greek_Ellinika-Greek'))
######################################
"""
Swahili
Swaheli-Latin1
Swahili_Kiswahili-Latin1
"""
import nltk
nltk.pos_tag(nltk.corpus.udhr.words('Swahili_Kiswahili-Latin1'))
########################################
"""
Chechewa_Nyanja-Latin1
Mayan_Yucateco-Latin1
'Miskito_Miskito-Latin1',
'Mixteco-Latin1',
'Nahuatl-Latin1'
Quechua-Latin1
"""
###############################################
###################
##################
################
##################
################
tagged_token = '''Sur/IN tiu/DT dolca/JJ tero/NNS vivas/VBP jam/RB de/IN miljaroj/NNP unu/CD el/IN plej/RB malnovaj/JJ gentoj/NNP de/IN
la/DT aria/JJ mondo./NNS En/IN la/DT norda/JJ parto/NNS estas/VBP parolata/JJ ankorau/RB la/DT antikva/JJ
lingvo/NNS litova,/JJ proksima/JJ a /IN la/DT sanskrita./JJ En/IN puraj/JJ moroj/NNP kaj/CC popolaj/JJ
kantoj/NNP iel/RB regas/VBP atmosfero/NNS mistera/JJ kun/IN influoj/NNP pensigaj/JJ al/IN Hindujo/NNS pratempa./JJ'''
[nltk.tag.str2tuple(t) for t in tagged_token.split()]
#############################
############################
##############################
##############################
############################
text = '''
Longe/RB vivadis/VBD en/IN paco/NNS tiu/DT gento/NNS trankvila,/JJ de/IN Kristanismo/NNS netusita/JJ gis/IN
dek-tria/dek-tria jarcento./NNS De/IN la/DT cetera/JJ mondo/NNS forkasita/JJ per/IN marcoj/NNP kaj/CC densaj/JJ
arbaregoj,/NNP kie/RB kuras/VBP gis/IN nun/VB sovagaj/JJ urbovoj,/NNP la/DT popolo/NNS dauris/VBD adori/ii la/DT
fortojn/NNP de/IN la/DT naturo/NNS sub/IN gigantaj/JJ kverkoj,/NNP vivanta/JJ templo/NNS de/IN la/DT dioj./NNP
Tie/RB tamen/RB ekbatalis/VBD okcidenta/JJ volo/NNS kun/IN orienta/JJ pacienco./NNS En/IN la/DT mezepoko/NNS
teutonaj/JJ kavaliroj/NNP tiun/DT landon/NNS almilitis,/VBD polaj/JJ nobeloj/NNP gin/PRP ligis/VBD al/IN sia/PRP$
stato,/NNS moskova/JJ caro/NNS gin/PRP atakis./VBD Dume/RB alkuradis/VBD el/IN tuta/JJ mondo/NNS
persekutataj/JJ Hebreoj/NNP por/IN starigi/ii manlaboron/NNS kaj/CC komercon/NNS lau/IN invito/NNS
rega./JJ Tiel/RB alia/JJ gento/NNS tre/RB maljuna/JJ trovis/VBD tie/RB novan/JJ Palestinon/NNS kaj/CC fondis/VBD
urbojn/NNP au/CC plenigis/VBD ilin./PRP'''
[nltk.tag.str2tuple(t) for t in text.split()]
#############################################
import nltk
en_words = nltk.pos_tag(nltk.corpus.udhr.words('Icelandic_Yslenska-Latin1'))
en_words_types = []
for i in range(len(en_words)):
en_words_types.append(en_words[i][1])
print(set(en_words_types))
list_of_languages = [
['English', ['English-Latin1'] ]
,['Esperanto', ['Esperanto-UTF8']]
,['German', ['German_Deutsch-Latin1']]
,['French', ['French_Francais-Latin1']]
,['Russian', ['Russian-UTF8','Russian_Russky-Cyrillic','Russian_Russky-UTF8']]
,['Farsi', ['Farsi_Persian-UTF8', 'Farsi_Persian-v2-UTF8']]
,['Finnish',['Finnish_Suomi-Latin1']]
,['Hungarian',['Hungarian_Magyar-Latin1','Hungarian_Magyar-Latin2','Hungarian_Magyar-UTF8']]
,['Turkish', ['Turkish_Turkce-Turkish','Turkish_Turkce-UTF8']]
,['Mongolian', ['Mongolian_Khalkha-Cyrillic','Mongolian_Khalkha-UTF8']]
,['Chinese',['Chinese_Mandarin-GB2312']]
,['Japanese',['Japanese_Nihongo-EUC','Japanese_Nihongo-SJIS','Japanese_Nihongo-UTF8']]
,['Korean',['Korean_Hankuko-UTF8']]
,['Hebrew',['Hebrew_Ivrit-Hebrew','Hebrew_Ivrit-UTF8']]
,['Hindi',['Hindi-UTF8','Hindi_web-UTF8']]
,['Kazakh',['Kazakh-Cyrillic','Kazakh-UTF8']]
,['Swedish',['Swedish_Svenska-Latin1']]
,['Icelandic' ,['Icelandic_Yslenska-Latin1']]
,['Sanskrit ' ,['Sanskrit-UTF8']]
,['Latin',['Latin_Latina-Latin1', 'Latin_Latina-v2-Latin1']]
,['Greek', ['Greek_Ellinika-Greek', 'Greek_Ellinika-UTF8']]
,['Swahili', ['Swaheli-Latin1','Swahili_Kiswahili-Latin1']]
]
for i in range(len(list_of_languages)):
print(list_of_languages[i][0])
####################################
|
dkarchmer/django-aws-template | server/apps/main/tests.py | Python | mit | 4,010 | 0.002743 | #import unittest
import json
from django.contrib.auth import get_user_model
from django.core import mail
from django.test import Client, TestCase
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APIClient, APIRequestFactory
from .models import *
user_model = get_user_model()
class MainTestCase(TestCase):
"""
Fixure includes:
"""
#fixtures = ['testdb_main.json']
def setUp(self):
self.u1 = user_model.objects.create_superuser(username='User1', email='user1@foo.com', password='pass')
self.u1.is_active = True
self.u1.save()
self.u2 = user_model.objects.create_user(username='User2', email='user2@foo.com', password='pass')
self.u2.is_active = True
self.u2.save()
self.u3 = user_model.objects.create_user(username='User3', email='user3@foo.com', password='pass')
self.u3.is_active = True
self.u3.save()
return
def tearDown(self):
user_model.objects.all().delete()
def testPages(self):
response = self.client.get('/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get('/api/v1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get('/robots.txt')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.login(email='user1@foo.com', password='pass')
response = self.client.get('/', {})
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.get('/api/v1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.logout()
def testPostContactMessage(self):
'''
re | sp = self.client.post('/api/v1/message', {'name':'M1', 'email':'foo@bar.com',
'phone':'(650)555-1234',
'message':'This is a test'},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
resp = self.client.post('/api/v1/message', {'name':'M1', 'email':'foo@bar.com',
'message':'This is another test from | same user'},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
resp = self.client.post('/api/v1/message', {'name':'M2', 'email':'foo@foo.com',
'message':'This is a test'},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Nobody should be able to read
resp = self.client.get('/api/v1/message', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
resp = self.client.get('/api/v1/message/1', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_401_UNAUTHORIZED)
# even if logged in but not staff
self.client.login(email='user2@foo.com', password='pass')
resp = self.client.get('/api/v1/message', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
resp = self.client.get('/api/v1/message/1', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
# SuperUser or Staff can access it
self.client.login(email='user1@foo.com', password='pass')
resp = self.client.get('/api/v1/message', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
deserialized = json.loads(resp.content)
self.assertEqual(deserialized['count'], 3)
resp = self.client.get('/api/v1/message/1', data={'format': 'json'})
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.client.logout()
'''
|
tbabej/astropy | astropy/extern/configobj/configobj.py | Python | bsd-3-clause | 88,262 | 0.001496 | # configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
import collections
from codecs import BOM_UTF8, B | OM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
from ...extern import six
from ...extern.six.moves import range, zip, map
# from __future__ import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf | _16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
|
tellybug/dynamic-dynamodb | dynamic_dynamodb/statistics/gsi.py | Python | apache-2.0 | 6,518 | 0 | # -*- coding: utf-8 -*-
""" This module returns stats about the DynamoDB table """
import math
from datetime import datetime, timedelta
from boto.exception import JSONResponseError, BotoServerError
from retrying import retry
from dynamic_dynamodb.aws import dynamodb
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.aws.cloudwatch import (
__get_connection_cloudwatch as cloudwatch_connection)
def get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of consumed reads
"""
try:
metrics = __get_aws_metric(
table_name,
gsi_name,
lookback_window_start,
'ConsumedReadCapacityUnits')
except BotoServerError:
raise
if metrics:
consumed_read_units = int(
math.ceil(float(metrics[0]['Sum'])/float(300)))
else:
consumed_read_units = 0
try:
consumed_read_units_percent = int(
math.ceil(
float(cons | umed_read_units) /
float(dynamodb.get_provisioned_gsi_read_units(
table_name, gsi_name)) * 100))
except JSONResponseError:
raise
logger.info('{0} - GSI: {1} - Consumed read units: {2:d}%'.format(
t | able_name, gsi_name, consumed_read_units_percent))
return consumed_read_units_percent
def get_throttled_read_event_count(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of throttled read events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of throttled read events
"""
try:
metrics = __get_aws_metric(
table_name, gsi_name, lookback_window_start, 'ReadThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_read_events = int(metrics[0]['Sum'])
else:
throttled_read_events = 0
logger.info('{0} - GSI: {1} - Read throttle count: {2:d}'.format(
table_name, gsi_name, throttled_read_events))
return throttled_read_events
def get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of consumed write units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of consumed writes
"""
try:
metrics = __get_aws_metric(
table_name,
gsi_name,
lookback_window_start,
'ConsumedWriteCapacityUnits')
except BotoServerError:
raise
if metrics:
consumed_write_units = int(
math.ceil(float(metrics[0]['Sum'])/float(300)))
else:
consumed_write_units = 0
try:
consumed_write_units_percent = int(
math.ceil(
float(consumed_write_units) /
float(dynamodb.get_provisioned_gsi_write_units(
table_name, gsi_name)) * 100))
except JSONResponseError:
raise
logger.info('{0} - GSI: {1} - Consumed write units: {2:d}%'.format(
table_name, gsi_name, consumed_write_units_percent))
return consumed_write_units_percent
def get_throttled_write_event_count(
table_name, gsi_name, lookback_window_start=15):
""" Returns the number of throttled write events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type lookback_window_start: int
:param lookback_window_start: How many seconds to look at
:returns: int -- Number of throttled write events
"""
try:
metrics = __get_aws_metric(
table_name, gsi_name, lookback_window_start, 'WriteThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_write_events = int(metrics[0]['Sum'])
else:
throttled_write_events = 0
logger.info('{0} - GSI: {1} - Write throttle count: {2:d}'.format(
table_name, gsi_name, throttled_write_events))
return throttled_write_events
@retry(
wait='exponential_sleep',
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=10)
def __get_aws_metric(table_name, gsi_name, lookback_window_start, metric_name):
""" Returns a metric list from the AWS CloudWatch service, may return
None if no metric exists
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of a GSI on the given DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: How many minutes to look at
:type metric_name str
:param metric_name Name of the metric to retrieve from CloudWatch
:returns: list --
A list of time series data for the given metric, may be None if
there was no data
"""
try:
now = datetime.utcnow()
start_time = now-timedelta(minutes=lookback_window_start)
end_time = now-timedelta(minutes=lookback_window_start-5)
return cloudwatch_connection().get_metric_statistics(
period=300, # Always look at 5 minutes windows
start_time=start_time,
end_time=end_time,
metric_name=metric_name,
namespace='AWS/DynamoDB',
statistics=['Sum'],
dimensions={
'TableName': table_name,
'GlobalSecondaryIndexName': gsi_name
},
unit='Count')
except BotoServerError as error:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
raise
|
kkirstein/proglang-playground | Python/benchmark/tests/test_benchmark.py | Python | mit | 90 | 0 | from be | nchmark import __version__
def test_version():
assert __version__ == '0.1.0 | '
|
ShyamSS-95/Bolt | example_problems/nonrelativistic_boltzmann/linear_modes/linear_modes_euler/1D/entropy_mode/run_cases.py | Python | gpl-3.0 | 1,686 | 0.002966 | import arrayfire as af
import numpy as np
from bolt.lib.physical_system import physical_system
from bolt.l | ib.nonlinear.nonlinear_solver import nonlinear_solver
from bolt.lib.linear.linear_solver import linear_solver
import input_files.domain as domain
import input_f | iles.boundary_conditions as boundary_conditions
import input_files.params as params
import input_files.initialize as initialize
import bolt.src.nonrelativistic_boltzmann.advection_terms as advection_terms
import bolt.src.nonrelativistic_boltzmann.collision_operator as collision_operator
import bolt.src.nonrelativistic_boltzmann.moments as moments
N = 2**np.arange(5, 10)
for i in range(N.size):
domain.N_q1 = int(N[i])
# Defining the physical system to be solved:
system = physical_system(domain,
boundary_conditions,
params,
initialize,
advection_terms,
collision_operator.BGK,
moments
)
N_g_q = system.N_ghost_q
nls = nonlinear_solver(system)
# Timestep as set by the CFL condition:
dt = params.N_cfl * min(nls.dq1, nls.dq2) \
/ max(domain.p1_end, domain.p2_end, domain.p3_end)
time_array = np.arange(0, params.t_final + dt, dt)
# Checking that time array doesn't cross final time:
if(time_array[-1]>params.t_final):
time_array = np.delete(time_array, -1)
for time_index, t0 in enumerate(time_array[1:]):
print('Computing For Time =', t0)
nls.strang_timestep(dt)
nls.dump_moments('dump/N_%04d'%(int(N[i])))
|
Kaggle/docker-python | tests/test_lightgbm.py | Python | apache-2.0 | 2,382 | 0.002939 | import unittest
import lightgbm as lgb
import pandas as pd
from common import gpu_test
class TestLightgbm(unittest.TestCase):
# Based on the "simple_example" from their documentation:
# https://github.com/Microsoft/LightGBM/blob/master/examples/python-guide/simple_example.py
def test_cpu(self):
lgb_train, lgb_eval = self.load_datasets()
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'force_row_wise': True,
'verbose': 0
}
# Run only one round for faster test
gbm = lgb.train(params,
lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
early_stopping_rounds=1)
self.assertEqual(1, gbm.best_iteration)
@gpu_test
def test_gpu(self):
lgb_train, lgb_eval = self.load_datasets()
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'auc',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'force_row_wise': True,
'verbose': 1,
'device': 'gpu'
}
# Run only one round for faster test
gbm = lgb.train(params,
lgb_train,
num_boost_roun | d=1,
valid_sets=lgb_eval,
early_stopping_roun | ds=1)
self.assertEqual(1, gbm.best_iteration)
def load_datasets(self):
df_train = pd.read_csv('/input/tests/data/lgb_train.csv', header=None, sep='\t')
df_test = pd.read_csv('/input/tests/data/lgb_test.csv', header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
return (lgb_train, lgb_eval)
|
zestrada/nova-cs498cc | nova/api/openstack/compute/contrib/multiple_create.py | Python | apache-2.0 | 1,025 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/ | LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# L | icense for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
class Multiple_create(extensions.ExtensionDescriptor):
"""Allow multiple create in the Create Server v1.1 API."""
name = "MultipleCreate"
alias = "os-multiple-create"
namespace = ("http://docs.openstack.org/compute/ext/"
"multiplecreate/api/v1.1")
updated = "2012-08-07T00:00:00+00:00"
|
yashchandak/GNN | Sample_Run/level2/Eval_MLP.py | Python | mit | 9,173 | 0.013845 | from __future__ import print_function
import tensorflow as tf
import numpy as np
import time, sys
from Eval_Data import Data
from Eval_Config import Config
import Eval_Calculate_Performance as perf
import Eval_utils as utils
class Network:
def __init__(self, cfg):
self.cfg = cfg
def loss(self, y_pred, y):
cross_loss = tf.add(tf.log(1e-10 + y_pred)*y, tf.log(1e-10 + (1-y_pred))*(1-y))
cross_entropy = -1*tf.reduce_mean(tf.reduce_sum(cross_loss,1))
vars = tf.trainable_variables()
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars])*0.000 #macro-F! improves when L2 loss is set to 0
total_loss = cross_entropy + lossL2
return total_loss
def weight_variable(self, name, shape):
initial = tf.truncated_normal(shape, stddev=1.0/shape[0])
return tf.Variable(initial, name=name)
def bias_variable(self, name, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def prediction(self, x, keep_prob):
with tf.variable_scope('Network'):
if self.cfg.hidden > 0:
W1 = self.weight_variable('weight1', [self.cfg.input_len, self.cfg.hidden])
b1 = self.bias_variable( 'bias1', [self.cfg.hidden])
W2 = self.weight_variable('weight2', [self.cfg.hidden, self.cfg.label_len] )
b2 = self.bias_variable( 'bias2', [self.cfg.label_len])
hidden = tf.nn.tanh(tf.matmul(x, W1) + b1)
hidden_drop = tf.nn.dropout(hidden, keep_prob)
self.y_pred = tf.sigmoid(tf.matmul(hidden_drop, W2) + b2)
else:
W1 = self.weight_variable('weight1',[self.cfg.input_len, self.cfg.label_len] )
b1 = self.bias_variable( 'bias1', [self.cfg.label_len])
self.y_pred = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
return self.y_pred
def train(self, loss, optimizer):
train_st | ep = optimizer.minimize(loss)
return train_step
class Model:
def __init__(self, config):
self.config = config
self.data = Data(config)
self.add_placeholders()
self.net = Network(self.config)
self.y_pred = self.net.prediction(self.x, self.keep_prob)
| self.optimizer= self.config.optimizer
self.loss = self.net.loss(self.y_pred, self.y)
self.train = self.net.train(self.loss, self.optimizer)
self.saver = tf.train.Saver()
self.init = tf.global_variables_initializer()
def add_placeholders(self):
self.x = tf.placeholder(tf.float32, shape=[None, self.config.input_len])
self.y = tf.placeholder(tf.float32, shape=[None, self.config.label_len])
self.keep_prob = tf.placeholder(tf.float32)
def run_epoch(self, sess):
err = []
i = 0
while self.data.has_more:
#Mini-batch execute
inputs, labels = self.data.next_batch()
feed_dict = {self.x:inputs, self.y:labels, self.keep_prob:self.config.drop}
_, loss, y_ = sess.run([self.train, self.loss, self.y_pred], feed_dict=feed_dict)
err.append(loss)
sys.stdout.write("\rRun: {}:: Loss = {}".format(i,np.mean(err)))
sys.stdout.flush()
i += 1
self.data.reset()
return np.mean(err)
def run_eval(self, sess, test=False, metrics=False):
#check Evaluation dataset
inputs_valid, labels_valid = self.data.get_validation(test)
feed_dict = {self.x:inputs_valid, self.y:labels_valid, self.keep_prob:self.config.drop}
y_, loss = sess.run([self.y_pred, self.loss], feed_dict=feed_dict)
if metrics:
metrics = perf.evaluate(y_, labels_valid, self.config.threshold)
return loss, metrics
else:
return loss
def print_metrics(self, inp):
for idx, item in enumerate(inp):
print(self.config.metrics[idx], ": ", item)
def fit(self, sess):
#define parametrs for early stopping early stopping
max_epochs = self.config.max_epochs
patience = self.config.patience # look as this many examples regardless
patience_increase = self.config.patience_increase # wait this much longer when a new best is found
improvement_threshold = self.config.improvement_threshold # a relative improvement of this much is considered significant
validation_loss = 1e5
step = 1
best_step = -1
losses = []
learning_rate = self.config.learning_rate
while step <= self.config.max_epochs:
start_time = time.time()
average_loss = self.run_epoch(sess)
duration = time.time() - start_time
if (step % self.config.val_epochs_freq == 0):
val_loss = self.run_eval(sess)
sys.stdout.write('\n Epoch %d: tr_loss = %.2f, val_loss = %.2f (%.3f sec)'% (step, average_loss, val_loss, duration))
sys.stdout.flush()
# Save model only if the improvement is significant
if (val_loss < validation_loss * improvement_threshold):
#patience = max(patience, step * patience_increase)
validation_loss = val_loss
best_step = step
patience = step + max(self.config.val_epochs_freq,self.config.patience_increase)
print('best step %d'%(best_step))
self.saver.save(sess, 'last-best')
elif val_loss > validation_loss * improvement_threshold:
patience = step - 1
else:
# Print status to stdout.
sys.stdout.write('Epoch %d: loss = %.2f (%.3f sec)' % (step, average_loss, duration))
sys.stdout.flush()
if (patience <= step):
#config.val_epochs_freq = 2
learning_rate = learning_rate / 10
self.optimizer = tf.train.AdamOptimizer(learning_rate)
patience = step + max(self.config.val_epochs_freq,self.config.patience_increase)
print('--------- Learning rate dropped to: %f'%(learning_rate))
if learning_rate <= 0.0000001:
print('Stopping by patience method')
break
losses.append(average_loss)
step += 1
print("Test Results")
#Reload the best state
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess2 = tf.Session(config = tfconfig)
new_saver = tf.train.import_meta_graph('last-best.meta')
new_saver.restore(sess2, tf.train.latest_checkpoint('./'))
test_loss, metrics = self.run_eval(sess2, test=True, metrics=True)
self.print_metrics(metrics)
return losses, best_step, metrics
def evaluate(cfg):
#with tf.variable_scope('Evaluation', reuse=None) as scope:
print("=====Configurations=====\n", cfg.__dict__)
all_results = {}
for train_percent in cfg.training_percents:
all_results[train_percent] = {}
for shuf in range(1,cfg.num_shuffles+1):
with tf.Graph().as_default():
model = Model(cfg)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config = tfconfig)
sess.run(model.init)
model.data.set_training_validation(train_percent, shuf)
losses, best_step, metrics = model.fit(sess)
all_results[train_percent][shuf] = metrics
for train_percent in sorted(all_results.keys()):
print ('Train percent:', train_percent)
micro, macro = [], []
#print numpy.mean(all_results[train_percent])
x = all_results[train_percent]
for v in x.values():
micro.append(v[3])
macro.append(v[4])
print |
machaharu/odenos | apps/mininet_examples/single_network_control/start_mininet.py | Python | apache-2.0 | 1,901 | 0.000526 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by | applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController, OVSSwitch
def st | art_of13_switches(controller, switches):
for s in switches:
s.start([controller])
s.sendCmd('ovs-vsctl set bridge %s protocols=OpenFlow13' % s)
if '__main__' == __name__:
net = Mininet(controller=RemoteController, autoStaticArp=True, switch=OVSSwitch)
c1 = net.addController('c1', ip='127.0.0.1', port=6653)
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
s1.linkTo(s2)
s2.linkTo(s3)
s3.linkTo(s4)
s4.linkTo(s1)
s1.linkTo(h1)
s3.linkTo(h2)
net.build()
c1.start()
start_of13_switches(c1, [s1, s2, s3, s4])
CLI(net)
net.stop()
|
eirki/script.service.koalahbonordic | tests/test_library.py | Python | mit | 9,102 | 0.003516 | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, division)
import unittest
import os
from os.path import isfile
import shutil
from lib import constants as const
from lib import utils
from lib import library
from tests import mock_constants
from tests import mock_kodi
from tests import mock_scraper
import main
# ## in mock watchlist:
# The Big Lebowski - to be added
# True Detective S01E01, S01E02 - to be added
# John Adams S01E02, S01E03 - already added
# Boardwalk Empire - to remain excluded
# Eternal Sunshine of the Spotless Mind - to remain excluded
# ## in mock shows:
# Flight of the Conchords, S02E01, S02E02 - to be removed
# John Adams S01E01, S01E02 - to be updated
# ## in mock movies:
# Jurassic Park III - to be removed
# ## in movk excluded shows:
# Boardwalk Empire - to remain excluded
# ## in mock excluded movies:
# Eternal Sunshine of the Spotless Mind - to remain excluded
# Testing get_watchlist with mutiple results:
# Remove movie: "Jurassic Park III", playcount 1
# Remove show: "Flight of the Conchords", remove S02E01 and S02E02 (playcount 2),
# Add movie: "The Big Lebowski"
# Add show: "True Detective", add 2 episodes, S01E01 (needs nfo), S01E02 (with json)
# Update show: "John Adams", remove 1 episode, add 1 episode, 1 episode remains
def setUpModule():
library.databases.stored_movies.filepath = utils.os_join(mock_constants.userdatafolder, "%s.json" %
library.databases.stored_movies.name)
library.databases.excluded_movies.filepath = utils.os_join(mock_constants.userdatafolder, "%s.json" %
library.databases.excluded_movies.name)
library.databases.stored_shows.filepath = utils.os_join(mock_constants.userdatafolder, "%s.json" %
library.databases.stored_shows.name)
library.databases.excluded_shows.filepath = utils.os_join(mock_constants.userdatafolder, "%s.json" %
library.databases.excluded_shows.name)
library.databases.prioritized_shows.filepath = utils.os_join(mock_constants.userdatafolder, "%s.json" %
library.databases.prioritized_shows.name)
library.databases.mediatypes.const = mock_constants
library.databases.mediatypes.kodi = mock_kodi
library.kodi = mock_kodi
library.scraper = mock_scraper
if os.path.exists(mock_constants.userdatafolder):
# delete mock userdata folder
shutil.rmtree(mock_constants.userdatafolder)
# copy mock userdata folder to userdata so it can be modified
shutil.copytree(utils.os_join(const.addonpath, "tests", "mock userdata"), mock_constants.userdatafolder)
main.main(argv={"mode": "library", "action": "startup"})
def check_movie(name, ext, season=None, episode=None):
path = utils.os_join(mock_constants.libpath, "%s movies" % const.provider, "%s.%s" % (name, ext))
return path
def check_episode(name, ext, season=None, episode=None):
path = utils.os_join(mock_constants.libpath, "%s shows" % const.provider, name, "Season %s" % season,
"%s S%02dE%02d.%s" % (name, season, episode, ext))
return path
class AddMovie(unittest.TestCase):
def test_add_movie_htm(self):
"""Was movie (The Big Lebowski) added, with HTM created?"""
path = check_movie(name="The Big Lebowski", ext="htm")
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_add_movie_nfo(self):
"""Was movie (The Big Lebowski) added, with NFO created?"""
path = check_movie(name="The Big Lebowski", ext="nfo")
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_add_movie_json(self):
"""Was movie (The Big Lebowski) added, with JSON deleted?"""
path = check_movie(name="The Big Lebowski", ext="json")
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
class RemoveMovie(unittest.TestCase):
def test_remove_movie_htm(self):
"""Was movie (Jurassic Park III) removed, with HTM deleted?"""
path = check_movie(name="Jurassic Park III", ext="htm")
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
def test_remove_movie_json(self):
"""Was movie (Jurassic Park III) removed, with JSON created?"""
path = check_movie(name="Jurassic Park III", ext="json")
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
class RemoveShow(unittest.TestCase):
def test_remove_show_S02E01_htm(self):
"""Was show (Flight of the Conchords) removed, with S02E01 htm deleted?"""
path = check_episode(name="Flight of the Conchords", ext="htm", season=2, episode=1)
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
def test_remove_show_S02E01_json(self):
"""Was show (Flight of the Conchords) removed, with S02E01 json not created?"""
path = check_episode(name="Flight of the Conchords", ext="json", season=2, episode=1)
self.assertFalse(isfile(path), msg="File created:\n%s" % path)
def test_remove_show_S02E02_htm(self):
"""Was show (Flight of the Conchords) removed, with S02E02 htm deleted?"""
path = check_episode(name="Flight of the Conchords", ext="htm", season=2, episode=2)
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
def test_remove_show_S02E02_json(self):
"""Was show (Flight of the Conchords) removed, with S02E02 json created?"""
path = check_episode(name="Flight of the Conchords", ext="json", season=2, episode=2)
self.assertTrue(isfile(path), msg="File not created:\n%s" % p | ath)
class AddShow(unittest.TestCase):
def test_add_show_S01E01_htm(self) | :
"""Was show (True Detective) added, with S01E01 htm created?"""
path = check_episode(name="True Detective", ext="htm", season=1, episode=1)
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_add_show_S01E01_nfo(self):
"""Was show (True Detective) added, with S01E01 nfo created?"""
path = check_episode(name="True Detective", ext="nfo", season=1, episode=1)
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_add_show_S01E01_json(self):
"""Was show (True Detective) added, with S01E02 json deleted?"""
path = check_episode(name="True Detective", ext="json", season=1, episode=2)
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
def test_add_show_S01E02_htm(self):
"""Was show (True Detective) added, with S01E02 htm created?"""
path = check_episode(name="True Detective", ext="htm", season=1, episode=2)
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
class UpdateShow(unittest.TestCase):
def test_update_show_S01E01_htm(self):
"""Was show (John Adams) updated, with S01E01 htm deleted?"""
path = check_episode(name="John Adams", ext="htm", season=1, episode=1)
self.assertFalse(isfile(path), msg="File not removed:\n%s" % path)
def test_update_show_S01E01_json(self):
"""Was show (John Adams) updated, with S01E01 json created?"""
path = check_episode(name="John Adams", ext="json", season=1, episode=1)
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_update_show_S01E02_htm(self):
"""Was show (John Adams) updated, with S01E02 htm retained?"""
path = check_episode(name="John Adams", ext="htm", season=1, episode=2)
self.assertTrue(isfile(path), msg="File not retained:\n%s" % path)
def test_update_show_S01E03_htm(self):
"""Was show (John Adams) updated, with S01E03 htm created?"""
path = check_episode(name="John Adams", ext="htm", season=1, episode=3)
self.assertTrue(isfile(path), msg="File not created:\n%s" % path)
def test_update_show_S01E03_json(self |
Vauxoo/account-financial-tools | partner_report_open_invoices/models/__init__.py | Python | agpl-3.0 | 174 | 0 | # -*- coding: utf-8 -*-
# Copyrig | ht 2016 Serpent Consulting Services Pvt. Ltd
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import res_ | company
|
gaufung/PythonStandardLibrary | Network/Selector/selector_echo_server.py | Python | mit | 1,269 | 0.00788 | import selector
import socket
mysel = selector.DefaultSelec | tor()
keep_running = True
def read(connection, mask):
'''
callback for read events
'''
global keep_running
client_address = connection.getpeername()
print('read({})'.format(client_address))
data | = connection.recv(1024)
if data:
print (' received {!r}'.format(data))
connection.sendall(data)
else:
print (' closing')
mysel.unregister(conneciton)
connection.close()
keep_running = False
def accept(sock, mask):
'''
callback for new connection
'''
new_connection, addr = sock.accept()
print('accept({})'.format(addr))
new_connection.setblocking(False)
mysel.register(new_connection, selectors.EVENT_READ, read)
server_address('localhost', 10000)
print('starting up on {} port {}'.format(*server_address))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblock(False)
server.bind(server_address)
server.listen(5)
mysel.register(server, selector.EVENT_READ, accept)
while keep_running:
print('waiting for I/O')
for key, mask in mysel.select(timeout=1):
callback = key.data
callback(key.fileobj,mask)
print('shutting down')
mysel.close() |
danirus/django-comments-xtd | example/custom/mycomments/forms.py | Python | bsd-2-clause | 659 | 0.001517 | from django import forms
from django.utils.translation import gettext_lazy as _
from django_comments_xtd.forms import XtdCommentFor | m
from django_comments_xtd.models import TmpXtdComment
class MyCommentForm(XtdCommentForm):
title = forms.CharField(max_length=256,
| widget=forms.TextInput(
attrs={'placeholder': _('title'),
'class': 'form-control'}))
def get_comment_create_data(self, site_id=None):
data = super(MyCommentForm, self).get_comment_create_data()
data.update({'title': self.cleaned_data['title']})
return data
|
goldhand/django-nupages | docs/conf.py | Python | bsd-3-clause | 8,131 | 0.007625 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import nupages
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-nupages'
copyright = u'2014, Will Farley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = nupages.__version__
# The full version, including alpha/beta/rc tags.
release = nupages.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-nupagesdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-nupages.tex', u'django-nupages Documentation',
u'Will Farley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-nupages', u'django-nupages Documentation',
[u'Will Farley'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-nupages', u'django-nupages Documentation',
u'Will Farley', 'django-nupages', 'P | ages for django.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, | do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False |
CI-WATER/gsshapy | gsshapy/base/__init__.py | Python | bsd-3-clause | 379 | 0 | """
*** | *****************************************************************************
* Name: Base Classes
* Author: Nathan Swain
* Created On: July 31, 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
from .rast import *
from .geom import *
from .file_base im | port *
|
lakshmi-kannan/st2 | st2common/st2common/util/templating.py | Python | apache-2.0 | 3,501 | 0.001143 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common.util.jinja import get_jinja_environment
from st2common.constants.keyvalue import DATASTORE_PARENT_SCOPE
from st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE
from st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE
from st2common.services.keyvalues import KeyValueLookup
from st2common.services.keyvalues import UserKeyValueLookup
__all__ = [
'render_template',
'render_template_with_system_context',
'render_template_with_system_and_user_context'
]
def render_template(value, context=None):
"""
Render provided template with the provided context.
:param value: Template string.
:type value: ``str``
:param context: Template context.
:type context: ``dict``
"""
assert isinstance(value, six.string_types)
context = context or {}
env = get_jinja_environment(allow_undefined=False) # nosec
template = env.from_string(value)
rendered = template.render(context)
return rendered
def render_template_with_system_context(value, context=None, prefix=None):
"""
Render provided template with a default system context.
:param value: Template string.
:type value: ``str``
:param context: Template context (optional).
:type context: ``dict``
:param prefix: Datastore key prefix (optional).
:type prefix: ``str``
:rtype: ``str``
"""
context = context or {}
context[SYSTEM_SCOPE] = KeyValueLookup(prefix=prefix, scope=SYSTEM_SCOPE)
context[DATASTORE_PARENT_SCOPE] = {
SYSTEM_SCOPE: KeyValue | Lookup(prefix=prefix, scope=SYSTEM_SCOPE)
}
rendered = render_template(value=value, context=context)
| return rendered
def render_template_with_system_and_user_context(value, user, context=None, prefix=None):
"""
Render provided template with a default system context and user context for the provided user.
:param value: Template string.
:type value: ``str``
:param user: Name (PK) of the user for the user scope.
:type user: ``str``
:param context: Template context (optional).
:type context: ``dict``
:param prefix: Datastore key prefix (optional).
:type prefix: ``str``
:rtype: ``str``
"""
context = context or {}
context[SYSTEM_SCOPE] = KeyValueLookup(prefix=prefix, scope=SYSTEM_SCOPE)
context[USER_SCOPE] = UserKeyValueLookup(prefix=prefix, user=user, scope=USER_SCOPE)
context[DATASTORE_PARENT_SCOPE] = {
SYSTEM_SCOPE: KeyValueLookup(prefix=prefix, scope=FULL_SYSTEM_SCOPE),
USER_SCOPE: UserKeyValueLookup(prefix=prefix, user=user, scope=FULL_USER_SCOPE)
}
rendered = render_template(value=value, context=context)
return rendered
|
scrapinghub/scrapinghub-buildpack-scrapy | vendor/sh_scrapy/sh_scrapy/log.py | Python | mit | 5,397 | 0.001297 | import sys, os, logging, warnings
from twisted.python import log as txlog
from scrapy import log, __version__, optional_features
from scrapy.utils.python import unicode_to_str
from sh_scrapy.hsref import hsref
# keep a global reference to stderr as it is redirected on log initialization
_stderr = sys.stderr
def _logfn(*args, **kwargs):
"""Wraps HS job logging function
Prevents errors writign to a closed batchuploader writer
It happens when the log writer is closed but batchuploader is still sending batches
"""
logs = hsref.job.logs
w = logs._writer
if not (w and w.closed):
logs.log(*args, **kwargs)
def initialize_logging():
"""Initialize logging to send messages to Hubstorage job logs
it initializes:
- Python logging
- Twisted logging
- Scrapy logging
- Redirects standard output and stderr to job log at INFO level
This duplicates some code with Scrapy log.start(), but it's required in
order to avoid scrapy from starting the log twice.
"""
# General python logging
root = logging.getLogger()
root.setLevel(logging.NOTSET)
hdlr = HubstorageLogHandler()
hdlr.setLevel(logging.INFO)
hdlr.setFormatter(logging.Formatter('[%(name)s] %(message)s'))
root.addHandler(hdlr)
# Silence commonly used noisy libraries
try:
import boto # boto overrides its logger at import time
except ImportError:
pass
nh = logging.NullHandler()
for ln in ('boto', 'requests', 'hubstorage'):
lg = logging.getLogger(ln)
lg.propagate = 0
lg.addHandler(nh)
# Redirect standard output and error to HS log
sys.stdout = StdoutLogger(0, 'utf-8')
sys.stderr = StdoutLogger(1, 'utf-8')
# Twisted specifics (includes Scrapy)
obs = HubstorageLogObserver(hdlr)
_oldshowwarning = warnings.showwarning
txlog.startLoggingWithObserver(obs.emit, setStdout=False)
warnings.showwarning = _oldshowwarning
# Scrapy specifics
if 'SCRAPY_JOB' in os.environ:
log.msg("Scrapy %s started" % __version__)
log.msg("Optional features available: %s" % ", ".join(optional_features), level=log.DEBUG)
log.start = _dummy # ugly but needed to prevent scrapy re-opening the log
return hdlr
def _dummy(*a, **kw):
"""Scrapy log.start dummy monkeypatch"""
pass
class HubstorageLogHandler(logging.Handler):
"""Python logging handler that writes to HubStorage"""
def emit(self, record):
try:
msg = self.format(record)
_logfn(msg, level=record.levelno)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def handleError(self, record):
cur = sys.stderr
try:
sys.stderr = _stderr
| logging.Handler.handleError(self, record)
finally:
sys.stderr = cur
class HubstorageLogObserver(object):
"""Twisted log observer with Scrapy | specifics that writes to HubStorage"""
def __init__(self, loghdlr):
self._hs_loghdlr = loghdlr
def emit(self, ev):
logitem = self._get_log_item(ev)
if logitem:
_logfn(**logitem)
def _get_log_item(self, ev):
"""Get HubStorage log item for the given Twisted event, or None if no
document should be inserted
"""
if ev['system'] == 'scrapy':
level = ev['logLevel']
else:
if ev['isError']:
level = logging.ERROR
else:
level = logging.INFO
# It's important to access level trough handler instance,
# min log level can change at any moment.
if level < self._hs_loghdlr.level:
return
msg = ev.get('message')
if msg:
msg = unicode_to_str(msg[0])
failure = ev.get('failure', None)
if failure:
msg = failure.getTraceback()
why = ev.get('why', None)
if why:
msg = "%s\n%s" % (why, msg)
fmt = ev.get('format')
if fmt:
try:
msg = fmt % ev
except:
msg = "UNABLE TO FORMAT LOG MESSAGE: fmt=%r ev=%r" % (fmt, ev)
level = logging.ERROR
msg = msg.replace('\n', '\n\t') # to replicate typical scrapy log appeareance
return {'message': msg, 'level': level}
class StdoutLogger(txlog.StdioOnnaStick):
"""This works like Twisted's StdioOnnaStick but prepends standard
output/error messages with [stdout] and [stderr]
"""
def __init__(self, isError=0, encoding=None, loglevel=logging.INFO):
txlog.StdioOnnaStick.__init__(self, isError, encoding)
self.prefix = "[stderr] " if isError else "[stdout] "
self.loglevel = loglevel
def _logprefixed(self, msg):
_logfn(message=self.prefix + msg, level=self.loglevel)
def write(self, data):
if isinstance(data, unicode):
data = data.encode(self.encoding)
d = (self.buf + data).split('\n')
self.buf = d[-1]
messages = d[0:-1]
for message in messages:
self._logprefixed(message)
def writelines(self, lines):
for line in lines:
if isinstance(line, unicode):
line = line.encode(self.encoding)
self._logprefixed(line)
|
gkc1000/pyscf | examples/grad/06-tddft_gradients.py | Python | apache-2.0 | 948 | 0.018987 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
TDDFT analytical nuclear gradients.
'''
from pyscf import gto, scf, dft, tddft
mol = gto.M(
atom = [
['O' , 0. , 0. , 0],
['H' , 0. , -0.757 , 0.587],
['H' , 0. , 0.757 , 0.587]],
basis = 'ccpvdz')
mf = scf.RHF(mol).run()
postmf = tddft.TDHF(mf).run()
g = postmf.nuc_grad_method()
g.kernel(state=1)
mf = dft.RKS(mol).x2c().set(xc='pbe0').run()
# Switch to xcfun because 3rd order GGA functional derivative is not
# available in libxc
mf._num | int.libxc = dft.xcfun
postmf = tddft.TDDFT(mf).run()
# PySCF-1.6.1 and newer supports the .Gradients method to create a gra | d
# object after grad module was imported. It is equivalent to call the
# .nuc_grad_method method.
from pyscf import grad
g = postmf.Gradients()
g.kernel(state=1)
#mf = scf.UHF(mol).x2c().run()
#postmf = tddft.TDHF(mf).run()
#g = postmf.nuc_grad_method()
#g.kernel()
|
migglu/life | src/life.py | Python | gpl-2.0 | 2,013 | 0.035271 | from cell import Cell
from twodarray import TwoDArray
from time import sleep
from configparser import config
import random
class Life(object):
def __init__(self, height, width):
self.area = TwoDArray(width, height)
self.buffer_area = TwoDArray(width, height)
self.rows = height
self.cols = width
for x in range(self.area.width):
for y in range(self.area.height):
self.area.set(x, y, Cell())
self.buffer_area.set(x, y, Cell())
@staticmethod
def copy_cells(from_, to_):
for x in range(from_.width):
for y in range(from_.height):
to_.get(x, y).set_state(from_.get(x, y).get_state())
def __repr__(self):
return self.__str__(self);
def __str__(self):
result = []
for cell in self.area:
result.append(str(cell))
result.append(' ') # TODO: not here...
return ''.join(result)
def get_alive_neighbours(self, area, x, y):
neighbours = 0
for offset_x in range(-1, 2):
for offset_y in range(-1, 2):
if offset_x == offset_y == 0:
continue
try:
current_cell = self.area.get(x + offset_x, y + offset_y)
if current_cell.is_alive():
neighbours += 1
except I | ndexError, e:
pass
return neighbours
def evolve(self):
Life.copy_cells(self.area, self.buffer_area)
for cell_num_x in range(self.area.width):
for cell_num_y in range(self.area.height):
neighbours = self.get_alive_neighbours(self.area, cell_num_x, cell_num_y)
curr_cell = self.buffer_area.get(cell_num_x, cell_num_y)
if ( neighbours == 3 and curr_cell.is_dead() ) or ( curr_cell.is_alive() and ( neighbours < 2 or n | eighbours > 3 ) ):
curr_cell.flip_state()
Life.copy_cells(self.buffer_area, self.area)
def randomize(self):
for cell in self.area:
if random.random() < float(config.random_alive_chance):
cell.set_alive()
else:
cell.set_dead()
def play_forever(self):
while 1:
print
print self
self.evolve()
#for debugging only, comment otherwise
#sys.exit(0)
sleep(float(config.evolve_interval))
|
JacobCallahan/rizza | rizza/helpers/prune.py | Python | gpl-3.0 | 2,971 | 0 | # -*- encoding: utf-8 -*-
"""A utility that tries saved genetic tests and removes those failing"""
import asyncio
import yaml
from pathlib import Path
from logzero import logger
from rizza import entity_tester
from rizza import genetic_tester
def genetic_prune(conf, entity='All'):
"""Check all saved genetic_tester tests for an entity, prune failures"""
if entity == 'All':
for target in list(entity_tester.EntityTester.pull_entities()):
genetic_prune(conf, target)
else:
test_file = conf.base_dir.joinpath(
'data/genetic_tests/{}.yaml'.format(entity))
logger.debug('Current target file: {}'.format(test_file))
to_remove = []
if test_file.exists() and test_file.stat().st_size > 10:
logger.debug('Beginning tests for {}'.format(entity))
tests = yaml.load(test_file.open('r'))
for test in tests:
ent, method, mode = test.split(' ')
if mode == 'positive':
logger.debug('Running test {}'.format(method))
result = genetic_tester.GeneticEntityTester(
conf, entity, method
).run_best()
if result == -1:
| logger.debug('{} failed.'.format(test))
to_remove.append(test)
else:
logger.debug('{} passed.'.format(test))
for test in to_remove:
logger.warning('Removing {} from {}'.format(test, test_file))
del tests[test]
logger.debug('Deleting file {}'.format(test_file))
test_file.unlink()
logger.debug('Writing tests to {}'.format(test_f | ile))
yaml.dump(tests, test_file.open('w+'), default_flow_style=False)
logger.info('Done pruning {}'.format(entity))
if test_file.exists() and test_file.stat().st_size < 10:
logger.warning('Deleting empty file {}'.format(test_file))
test_file.unlink()
async def _async_prune(conf, entity, loop, sem):
"""Run an individual prune task"""
async with sem:
await loop.run_in_executor(
None, # use default executor
genetic_prune, conf, entity # function and args
)
async def _async_prune_all(conf, loop, sem):
"""Construct all the prune tasks, and await them"""
tasks = [
asyncio.ensure_future(_async_prune(conf, entity, loop, sem))
for entity in list(entity_tester.EntityTester.pull_entities())
]
await asyncio.wait(tasks)
def async_genetic_prune(conf, entity='All', async_limit=100):
"""Asynchronously perform a genetic prune for all entities"""
if entity != 'All':
genetic_prune(conf, entity)
return
sem = asyncio.Semaphore(async_limit)
loop = asyncio.get_event_loop()
loop.run_until_complete(_async_prune_all(conf, loop, sem))
loop.close()
|
dwrpayne/zulip | zerver/management/commands/deactivate_realm.py | Python | apache-2.0 | 789 | 0.001267 | from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
import sys
from zerver.lib.actions import do_deactivate_realm
from zerver.models import get_realm
class Command(BaseCommand): |
help = """Script to deactivate a realm."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help='domain of realm to deactivate')
def handle(self, *args, **options):
realm = get_realm(options["domain"])
if realm is None:
print("Could not find realm %s" % (options["domain | "],))
sys.exit(1)
print("Deactivating", options["domain"])
do_deactivate_realm(realm)
print("Done!")
|
FederatedAI/FATE | examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-normal.py | Python | apache-2.0 | 2,733 | 0.001829 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isi | nstance(config, str):
config = load_job_config(config)
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "rmsprop",
"tol": 0.0001,
"alpha": 0.01,
"max_iter": 30,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"sqn_param": {
"update_interval_L": 3,
"memory_M": 5,
"sample_siz | e": 5000,
"random_seed": None
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint"],
"save_freq": "epoch"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param)
# dsl_json = predict_pipeline.get_predict_dsl()
# conf_json = predict_pipeline.get_predict_conf()
# import json
# json.dump(dsl_json, open('./hetero-lr-normal-predict-dsl.json', 'w'), indent=4)
# json.dump(conf_json, open('./hetero-lr-normal-predict-conf.json', 'w'), indent=4)
# fit model
pipeline.fit()
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
common_tools.prettify(pipeline.get_component("evaluation_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
tranlyvu/autonomous-vehicle-projects | Traffic Sign Classifier/src/second_attempt.py | Python | apache-2.0 | 8,997 | 0.018784 | import pickle
import pandas as pd
import numpy as np
import random
import cv2
import glob
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
def grayscale(input_image):
output = []
for i in range(len(input_image)):
img = cv2.cvtColor(input_image[i], cv2.COLOR_RGB2GRAY)
output.append(img)
return output
def normalization(input_image):
"""normalization
Pre-defined interval [-1,1]
from the forum :https://discussions.udacity.com/t/accuracy-is-not-going-over-75-80/314938/22
some said that using the decimal 128.0 makes huge diffference
"""
output = []
for i in range(len(input_image)):
img = np.array((input_image[i] - 128.0) / (128.0), dtype=np.float32)
output.append(img)
return output
def get_weights(input_shape):
return tf.Variable(tf.truncated_normal(shape = input_shape, mean = 0.0, stddev = 0.1))
def get_biases(length):
return tf.Variable(tf.zeros(length))
#NOTE: number of filter is output channel
def convolution_layer(input_image,
filter_size,
input_channel,
number_of_filters,
padding_choice = 'VALID'):
shape = [filter_size, filter_size, input_channel, number_of_filters]
weights = get_weights(input_shape = shape)
biases = get_biases(length = number_of_filters)
layer = tf.nn.conv2d(input = input_image,
filter = weights,
strides = [1, 1, 1, 1],
padding = padding_choice) + biases
return layer
def activation_relu(input_layer):
return tf.nn.relu(input_layer)
def max_spooling(input_layer, padding_choice):
return tf.nn.max_pool(value = input_layer,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding= padding_choice)
def flatten_layer(input_layer):
return flatten(input_layer)
def fully_connected_layer(input_layer,
number_of_inputs,
number_of_outputs):
weights = get_weights(input_shape = [number_of_inputs, number_of_outputs])
biases = get_biases(length = number_of_outputs)
layer = tf.matmul(input_layer, weights) + biases
return layer
def dropout_layer(layer, keep_prob):
layer = tf.nn.dropout(layer, keep_prob)
return layer
"""Pre-processing data"""
def pre_process_second_attempt(input_image):
gray_image = grayscale(input_image)
normalization_image = normalization(gray_image)
output = np.expand_dims(normalization_image, 3)
return output
X_train_final_2 = pre_process_second_attempt(X_train_original)
X_valid_final_2 = pre_process_second_attempt(X_valid_original)
"""Pre-processing data"""
def preprocess_data(input_image):
gray_image = grayscale(input_image)
output = normalization(gray_image)
output = np.expand_dims(output, 3)
return output
X_train_final = preprocess_data(X_train_original)
X_valid_final = preprocess_data(X_valid_original)
print(X_train_final[0].shape)
"""Model design"""
def Lenet_5_model(input_image):
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x10.
conv1 = convolution_layer(input_image, 5, 1, 10, 'VALID')
conv1 = activation_relu(conv1)
# Layer 2: Convolutional. Input = 28x28x10. Output = 24x24x20.
conv2 = convolution_layer(conv1, 5, 10, 20, 'VALID')
conv2 = activation_relu(conv2)
# drop-out
conv2 = dropout_layer(conv2, keep_prob)
# Layer 3: Convolutional. Input = 24x24x20. Output = 20x20x30.
conv3 = convolution_layer(conv2, 5, 20, 30, 'VALID')
conv3 = activation_relu(conv3)
# drop-out
conv3 = dropout_layer(conv3, keep_prob)
# Layer 4: Convolutional. Input = 20x20x30. Output = 16x16x40.
conv4 = convolution_layer(conv3, 5, 30, 40, 'VALID')
conv4 = tf.nn.relu(conv4)
# max_pool: output = 8x8x40
conv4 = max_spooling(conv4, 'VALID')
# drop-out
conv4 = dropout_layer(conv4, keep_prob)
# Flatten. Input = 8x8x40. Output = 2560.
fc0 = flatten_layer(conv4)
# Layer 5: Fully Connected. Input = 2560. Output = 1280.
fc1 = fully_connected_layer(fc0, 2560, 1280)
fc1 = tf.nn.relu(fc1)
# Layer 6: Fully Connected. Input = 1280. Output = 640.
fc2 = fully_connected_layer(fc1, 1280, 640)
fc2 = tf.nn.relu(fc2)
# Layer 7: Fully Connected. Input = 640. Output = 320
fc3 = fully_connected_layer(fc2, 640, 320)
fc3 = tf.nn.relu(fc3)
# Layer 8: Fully Connected. Input = 320. Output = 160
fc4 = fully_connected_layer(fc3, 320, 160)
fc4 = tf.nn.relu(fc4)
# Layer 9: Fully Connected. Input = 160. Output = 80
fc5 = fully_connected_layer(fc4, 160, 80)
fc5 = tf.nn.relu(fc5)
# Layer 10: Fully Connected. Input = 80. Output = 43
logits = fully_connected_layer(fc5, 80, 43)
return logits
"""Evaluation function"""
def evaluate(X_data, y_data, my_keep_prob):
num_examples = len(X_data)
total_accuracy = 0
total_loss = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset : offset + BATCH_SIZE], y_data[offset : offset + BATCH_SIZE]
loss, accuracy = sess.run([loss_operation, accuracy_operation], feed_dict={x: batch_x,
y: batch_y,
keep_prob: my_keep_prob})
total_accuracy += (accuracy * len(batch_x))
total_loss += (loss * len(batch_x))
return total_loss / num_examples, total_accuracy / num_examples
"""Training data"""
if __name__ == "__main__":
''' Pre-processing pipeline
- graysclale
- normalize
- reshape input data to (32,32,1)
'''
# The data is probably in order RGB
# type uint8
training_file = '../../../train.p'
validation_file= '../../../valid.p'
testing_file = '../../../test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train_original, y_train_original = train['features'], train['labels']
X_valid_original, y_valid_original = valid['features'], valid['labels']
X_test_original, y_test_original = test['features'], test['labels']
"""Parameters setting"""
EPOCHS = 40
BATCH_SIZE = 128
LEARNING_RATE = 0.0001
'''Training and save'''
keep_prob = tf.placeholder(tf.float32)
# x is a placeholder for a batch of input images. y is a placeholder for a batch of output labels.
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
# convert to 1 hot-coded data
one_hot_y = tf.one_hot(y, 43)
logits = Lenet_5_model(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = LEARNING_RATE)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.e | qual(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
train_loss_history = []
valid_loss_history = []
#Start running tensor flow
with tf.Session() as sess:
sess.run(tf.global_variables_initial | izer())
num_examples = len(X_train_final)
print("Training...")
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train_final, y_train_original)
for offset in range(0, num_examples, BATC |
nomad-vino/SPSE-1 | Module 5/x5_7.py | Python | gpl-3.0 | 1,696 | 0.014741 | #!/usr/bin/python
print " __ "
print " |__|____ ___ __ "
print " | \__ \\\\ \/ / "
print " | |/ __ \\\\ / "
print " /\__| (____ /\_/ "
print " \______| \/ "
print " "
print 'Module 5'
print 'Exploitation Techniques'
print
"""
Write a pyCommand script to find if the DEP, ASLR, SafeSEH modules are enabled
"""
import immlib
import struct
DESC = "DEP, ASLR and SafeSEH Detection in all Modules"
# More information
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
# How to detect presence of security mechanisms
IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 # DEP compatible
IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 # ASLR
def main(args) :
imm = immlib.Debugger()
# code borrowed from safeseh pycommand
allmodules=imm.getAllModules()
for key in allmodules.keys():
de | p = aslr = "NO"
module = imm.getModule(key)
module_baseAddress = module.getBaseAddress()
pe_offset = struct.unpack('<L',imm.readMemory(module_baseAddress + 0x3c,4))[0]
pebase = module_baseAddress + pe_offset
flags = struct.unpack('<H',imm.readMemory(pebase + 0x5e,2))[0]
if (flags & IMAGE | _DLLCHARACTERISTICS_NX_COMPAT != 0) :
dep = "YES"
if (flags & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE != 0) :
aslr = "YES"
imm.log("---- %s ----" %key)
imm.log("DEP: %s ASLR: %s" %(dep, aslr))
imm.log("--------------")
return "[+] Executed Successfully"
|
abdhaleegit/avocado-misc-tests | toolchain/gdb.py | Python | gpl-2.0 | 3,103 | 0 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <pavrampu@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class GDB(Test):
def setUp(self):
sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev'])
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
self.fail('no packages list for your distro.')
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir)
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of une | xpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have fail | ed")
|
Vicyorus/BattleTank | src/Bullet.py | Python | gpl-3.0 | 5,190 | 0.010405 | import pygame
from Explosion import Explosion
class Bullet(object):
PLAYER, ENEMY = 1, 0
def __init__(self, manager, parent, init_pos, direction, speed=3):
self.manager = manager
self.parent = parent
self.image = pygame.image.load("res/tanks/bullet.png")
self.explosion = pygame.image.load("res/explosions/bullet_explosion.png")
self.rect = self.calculate_init_point(direction, init_pos)
self.speed = self.calculate_speed(direction, speed)
def calculate_speed(self, direction, speed):
if direction == 0: # Up
return (0, -speed)
if direction == 1: # Down
self.image = pygame.transform.rotate(self.image, 180)
return (0, speed)
if direction == 2: # Left
self.image = pygame.transform.rotate(self.image, 90)
return (-speed, 0)
if direction == 3: # Right
self.image = pygame.transform.rotate(self.image, -90)
return (speed, 0)
def calculate_init_point(self, direction, init_pos):
rect = self.image.get_rect()
posX = init_pos[0]
posY = init_pos[1]
if direction == 0:
rect.x = posX + 12
rect.y = posY - 14
if direction == 1:
rect.x = posX + 12
rect.y = posY + 32
if direction == 2:
rect.x = posX - 14
rect.y = posY + 12
if direction == 3:
rect.x = posX + 32
rect.y = posY + 12
return rect
def update(self, blocks):
posX = self.speed[0]
posY = self.speed[1]
self.rect.x += posX
self.rect.y += posY
# Si nos vamos a salir del mundo, explotamos
if self.rect.x < 0:
self.rect.x = 0
self.explode()
if self.rect.x > 632 | :
self.rect.x = 632
self.explode()
if self.rect.y < 0:
self.rect.y = 0
self.explode()
if self.rect.y > 568:
self. | rect.y = 568
self.explode()
crashed = False
# Check if we crashed with another block
for block in blocks:
# We can't crash with ourselves... can we?
if block == self:
pass
# If we do crash, we tell the manager to destroy said block
elif self.rect.colliderect(block):
# Right after we check if we can destroy said block
block_name = type(block).__name__
if block_name in ["Block", "Heart", "Bullet"]:
self.impact_side(block)
if self.manager.destroy_element(block): # Block tells us if it destroyed
crashed = True
else: # Else, we explode
self.explode()
elif block_name == "Enemy" and self.parent: # Player bullet against enemy
self.impact_side(block)
# If enemy tells us it destroyed, it's a kill
if self.manager.destroy_element(block):
self.manager.increment_kills()
crashed = True
else: # Else, we explode
self.explode()
elif block_name == "Enemy" and not self.parent: # Enemy bullet hitting enemy
crashed = True
elif block_name == "Jugador" and not self.parent: # Enemy bullet hitting the player
self.impact_side(block)
# If the player destroys, we destroy
if self.manager.destroy_element(block):
crashed = True
else: # Else, we explode
self.explode()
else:
pass
if crashed: # If we crashed, we destroy ourselves
self.destroy()
def destroy(self):
if self.parent == self.PLAYER:
self.manager.remove_player_bullet()
self.manager.remove_bullet(self)
return True
def explode(self):
if self.parent == self.PLAYER:
self.manager.remove_player_bullet()
# Create the explosion
Explosion(self.manager, self.rect)
self.manager.remove_bullet(self)
return True
def impact_side(self, block):
posX = self.speed[0]
posY = self.speed[1]
if posX > 0: # Left side
self.rect.right = block.rect.left
if posX < 0: # Right side
self.rect.left = block.rect.right
if posY > 0: # Upper side
self.rect.bottom = block.rect.top
if posY < 0: # Lower side
self.rect.top = block.rect.bottom |
googleapis/python-resource-manager | google/cloud/resourcemanager_v3/services/tag_keys/pagers.py | Python | apache-2.0 | 5,731 | 0.001396 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.resourcemanager_v3.types import tag_keys
class ListTagKeysPager:
"""A pager for iterating through ``list_tag_keys`` requests.
This class thinly wraps an initial
:class:`google.cloud.resourcemanager_v3.types.ListTagKeysResponse` object, and
provides an ``__iter__`` method to iterate through its
``tag_keys`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTagKeys`` requests and continue to iterate
through the ``tag_keys`` field on the
corresponding responses.
All the usual :class:`google.cloud.resourcemanager_v3.types.ListTagKeysResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., tag_keys.ListTagKeysResponse],
request: tag_keys.ListTagKeysRequest,
response: tag_keys.ListTagKeysResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.resourcemanager_v3.types.ListTagKeysRequest):
The initial request object.
response (google.cloud.resourcemanager_v3.types.ListTagKeysResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tag_keys.ListTagKeysRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[tag_keys.ListTagKeysResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[tag_keys.TagKey]:
for page in self.pages:
yield from page.tag_keys
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTagKeysAsyncPager:
"""A pager for iterating through ``list_tag_keys`` requests.
This class thinly wraps an initial
:class:`google.cloud.resourcemanager_v3.types.ListTagKeysResponse` object, and
provides an ``__aiter__`` method to iterate through its
``tag_keys`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTagKeys`` requests and continue to iterate
through the ``tag_keys`` field on the
corresponding responses.
All the usual :class:`google.cloud.resourcemanager_v3.types.ListTagKeysResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[tag_keys.ListTagKeysResponse]],
request: tag_keys.ListTagKeysRequest,
response: tag_keys.ListTagKeysResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.resourcemanager_v3.types.ListTagKeysRequest):
The initial request object.
response (google.cloud.resourcemanager_v3.types.ListTagKeysResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent al | ong with the request as metadata.
"""
self._method = method
self._request = tag_keys.ListTagKeysRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[tag_keys.ListTagKeysRespon | se]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[tag_keys.TagKey]:
async def async_generator():
async for page in self.pages:
for response in page.tag_keys:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
vmg/hg-stable | mercurial/localrepo.py | Python | gpl-2.0 | 96,365 | 0.000903 | # localrepo.py - read/write repository class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, short
from i18n import _
import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
import changelog, dirstate, filelog, manifest, context, bookmarks, phases
import lock, transaction, store, encoding
import scmutil, util, extensions, hook, error, revset
import match as matchmod
import merge as mergemod
import tags as tagsmod
from lock import release
import weakref, errno, os, time, inspect
import branchmap
propertycache = util.propertycache
filecache = scmutil.filecache
class repofilecache(filecache):
"""All filecache usage on repo are done for logic that should be unfiltered
"""
def __get__(self, repo, type=None):
return super(repofilecache, self).__get__(repo.unfiltered(), type)
def __set__(self, repo, value):
return super(repofilecache, self).__set__(repo.unfiltered(), value)
def __delete__(self, repo):
return super(repofilecache, self).__delete__(repo.unfiltered())
class storecache(repofilecache):
"""filecache for files in the store"""
def join(self, obj, fname):
return obj.sjoin(fname)
class unfilteredpropertycache(propertycache):
"""propertycache that apply to unfiltered repo only"""
def __get__(self, repo, type=None):
return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
class filteredpropertycache(propertycache):
"""propertycache that must take filtering in account"""
def cachevalue(self, obj, value):
object.__setattr__(obj, self.name, value)
def hasunfilteredcache(repo, name):
"""check if a repo has an unfilteredpropertycache value for <name>"""
return name in vars(repo.unfiltered())
def unfilteredmethod(orig):
"""decorate method that always need to be run on unfiltered version"""
def wrapper(repo, *args, **kwargs):
return orig(repo.unfiltered(), *args, **kwargs)
return wrapper
MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
class localpeer(peer.peerrepository):
'''peer for a local repo; reflects only the most recent API'''
def __init__(self, repo, caps=MODERNCAPS):
peer.peerrepository.__init__(self)
self._repo = repo.filtered('served')
self.ui = repo.ui
self._caps = repo._restrictcapabilities(caps)
self.requirements = repo.requirements
self.supportedformats = repo.supportedformats
def close(self):
self._repo.close()
def _capabilities(self):
return self._caps
def local(self):
return self._repo
def canpush(self):
return True
def url(self):
return self._repo.url()
def lookup(self, key):
return self._repo.lookup(key)
def branchmap(self):
return self._repo.branchmap()
def heads(self):
return self._repo.heads()
def known(self, nodes):
return self._repo.known(nodes)
def getbundle(self, source, heads=None, common=None, bundlecaps=None):
return self._repo.getbundle(source, heads=heads, common=common,
bundlecaps=None)
# TODO We might want to move the next two calls into legacypeer and add
# unbundle instead.
def lock(self):
return self._repo.lock()
def addchangegroup(self, cg, source, url):
return self._repo.addchangegroup(cg, source, url)
def pushkey(self, namespace, key, old, new):
return self._repo.pushkey(namespace, key, old, new)
def listkeys(self, namespace):
return self._repo.listkeys(namespace)
def debugwireargs(self, one, two, three=None, four=None, five=None):
'''used to test argument passing over the wire'''
return "%s %s %s %s %s" % (one, two, three, four, five)
class locallegacypeer(localpeer):
'''peer extension which implements legacy methods too; used for tests with
restricted capabilities'''
def __init__(self, repo):
localpeer.__init__(self, repo, caps=LEGACYCAPS)
def branches(self, nodes):
return self._repo.branches(nodes)
def between(self, pairs):
return self._repo.between(pairs)
def changegroup(self, basenodes, source):
return self._repo.changegroup(basenodes, source)
def changegroupsubset(self, bases, heads, source):
return self._repo.changegroupsubset(bases, heads, source)
class localrepository(object):
supportedformats = set(('revlogv1', 'generaldelta'))
supported = supportedformats | set(('store', 'fncache', 'shared',
'dotencode'))
openerreqs = set(('revlogv1', 'generaldelta'))
requirements = ['revlogv1']
filtername = None
def _baserequirements(self, create):
return self.requirements[:]
def __init__(self, baseui, path=None, create=False):
self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
self.wopener = self.wvfs
self.root = self.wvfs.base
self.path = self.wvfs.join(".hg")
self.origroot = path
self.auditor = scmutil.pathauditor(self.root, self._checknested)
self.vfs = scmutil.vfs(self.path)
self.opener = self.vfs
self.baseui = baseui
self.ui = baseui.copy()
# A list of callback to shape the phase if no data were found.
# Callback are in the form: func(repo, roots) --> processed root.
# This list it to be filled by extension during repo setup
self._phasedefaults = []
try:
self.ui.readconfig(self.join("hgrc"), self.root)
extensions.loadall(self.ui)
except IOError:
pass
if not self.vfs.isdir():
if create:
if not self.wvfs.exists():
self.wvfs.makedirs()
self.vfs.makedir(notindexed=True)
requirements = self._baserequirements(create)
if self.ui.configbool('format', 'usestore', True):
self.vfs.mkdir("store")
requirements.append("store")
if self.ui.configbool('format', 'usefncache', True):
requirements.append("fncache")
if self.ui.configbool('format', 'dotencode', True):
requirements.append('dotencode')
# create an invalid changelog
self.vfs.append(
"00changelog.i",
'\0\0\0\2' # represents revlogv2
' dummy changelog to prevent using the old repo layout'
)
if self.ui.configbool('format', 'generaldelta', False):
requirements.append("generaldelta")
requirements = set(requirements)
else:
raise error.RepoError(_("repository %s not found") % path)
elif create:
raise error.RepoError(_("repository %s already exists") % path)
else:
try:
requirements = scmutil.readrequires(self.vfs, self.supported)
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
requiremen | ts = set()
self.sharedpath = self.path
try:
vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
realpath=True)
s = vfs.base
if not vfs.exists():
raise error.RepoError(
_('.hg/sharedpath points to nonexistent directory %s') % s)
self.sharedpath = s
| except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
self.spath = self.store.path
self.svfs = self.store.vfs
self.sopener = self.svf |
Pointedstick/ReplicatorG | skein_engines/skeinforge-44/fabmetheus_utilities/geometry/manipulation_paths/bevel.py | Python | gpl-2.0 | 2,387 | 0.028069 | """
Add material to support overhang or remove material at the overhang angle.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/ | 05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalExecutionOrder = 20
def getBevelPath( begin, center, close, end, radius ):
"Get bevel path."
beginComplex = begin.dropAxis()
centerComplex = center.dropAxis()
endComplex = end.dropAxis()
beginComplexSegmentLength = abs( centerComplex - beginComplex )
endComplexSegmentLength = abs( center | Complex - endComplex )
minimumRadius = lineation.getMinimumRadius( beginComplexSegmentLength, endComplexSegmentLength, radius )
if minimumRadius <= close:
return [ center ]
beginBevel = center + minimumRadius / beginComplexSegmentLength * ( begin - center )
endBevel = center + minimumRadius / endComplexSegmentLength * ( end - center )
if radius > 0.0:
return [ beginBevel, endBevel ]
midpointComplex = 0.5 * ( beginBevel.dropAxis() + endBevel.dropAxis() )
spikeComplex = centerComplex + centerComplex - midpointComplex
return [ beginBevel, Vector3( spikeComplex.real, spikeComplex.imag, center.z ), endBevel ]
def getManipulatedPaths(close, elementNode, loop, prefix, sideLength):
"Get bevel loop."
if len(loop) < 3:
return [loop]
radius = lineation.getRadiusByPrefix(elementNode, prefix, sideLength)
if radius == 0.0:
return loop
bevelLoop = []
for pointIndex in xrange(len(loop)):
begin = loop[(pointIndex + len(loop) - 1) % len(loop)]
center = loop[pointIndex]
end = loop[(pointIndex + 1) % len(loop)]
bevelLoop += getBevelPath( begin, center, close, end, radius )
return [ euclidean.getLoopWithoutCloseSequentialPoints( close, bevelLoop ) ]
def processElementNode(elementNode):
"Process the xml element."
lineation.processElementNodeByFunction(elementNode, getManipulatedPaths)
|
rangertaha/salt-manager | salt-manager/webapp/apps/fabric/fabhistory/urls.py | Python | mit | 1,051 | 0.010466 | #!/usr/bin/env python
"""
"""
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r'^index/$', FabricIndex.as_view(), name='fabric_index'),
# Users
url(r'^access/$', FabricAccessList.as_view(), name=' | fabric_access'),
url(r'^access/create/$', FabricAccessCreate.as_view(), name='fabric_access_creat | e'),
url(r'^access/(?P<pk>\d+)/$', FabricAccessDetail.as_view(), name='fabric_access_details'),
url(r'^access/delete/(?P<pk>\d+)/$', FabricAccessDelete.as_view(), name='fabric_access_delete'),
url(r'^access/update/(?P<pk>\d+)/$', FabricAccessUpdate.as_view(), name='fabric_access_update'),
url(r'^history/$', ExecutionHistoryList.as_view(), name='fabric_exe_history'),
url(r'^history/(?P<pk>\d+)/$', ExecutionHistoryDetail.as_view(), name='fabric_exe_history_details'),
#url(r'^cmd/$', ExecutionHistoryList.as_view(), name='command'),
#url(r'^cmd/(?P<pk>\d+)/$', ExecutionHistoryDetail.as_view(), name='command_details'),
) |
dadavidson/Python_Lab | Python Examples/functions-inmodule.py | Python | mit | 144 | 0.020833 | import math # Imports the math module
everything = dir(math) # Sets everything to a list of things f | rom math
print everything # Prints | 'em all!
|
arizvisa/syringe | lib/ndk/pstypes.py | Python | bsd-2-clause | 57,087 | 0.002575 | import ptypes, pecoff
from ptypes import *
from . import error, ldrtypes, rtltypes, umtypes, ketypes, Ntddk, heaptypes, sdkddkver
from .datatypes import *
class PEB_FREE_BLOCK(pstruct.type): pass
class PPEB_FREE_BLOCK(P(PEB_FREE_BLOCK)): pass
PEB_FREE_BLOCK._fields_ = [(PPEB_FREE_BLOCK, 'Next'), (ULONG, 'Size')]
class _Win32kCallbackTable(pstruct.type, versioned):
_fields_ = [
(PVOID, 'fnCOPYDATA'),
(PVOID, 'fnCOPYGLOBALDATA'),
(PVOID, 'fnDWORD'),
(PVOID, 'fnNCDESTROY'),
(PVOID, 'fnDWORDOPTINLPMSG'),
(PVOID, 'fnINOUTDRAG'),
(PVOID, 'fnGETTEXTLENGTHS'),
(PVOID, 'fnINCNTOUTSTRING'),
(PVOID, 'fnPOUTLPINT'),
(PVOID, 'fnINLPCOMPAREITEMSTRUCT'),
(PVOID, 'fnINLPCREATESTRUCT'),
(PVOID, 'fnINLPDELETEITEMSTRUCT'),
(PVOID, 'fnINLPDRAWITEMSTRUCT'),
(PVOID, 'fnPOPTINLPUINT'),
(PVOID, 'fnPOPTINLPUINT2'),
(PVOID, 'fnINLPMDICREATESTRUCT'),
| (PVOID, 'fnINOUTLPMEASUREITEMSTRUCT'),
(PVOID, 'fnINLPWINDOWPOS'),
(PVOID, 'fnINOUTLPPOINT5'),
(PVOID, 'fnINOUTLPSCROLLINFO'),
(PVOID, 'fnINOUTLPRECT'),
(PVOID, 'fnINOUTNCCALCSIZE'),
(PVOID, 'fnINOUTLPPOINT5_'),
(PVOID, 'fnINPAINTCLIPBRD'),
(PVOID, 'fnINSIZECLIPBRD'),
(PVOID, 'fnINDESTROYCLIPBRD'),
(PVOID, 'fnINSTRING'),
(PVOID, 'fnINSTRINGNULL'),
(PVOID, 'fnINDEVICEC | HANGE'),
(PVOID, 'fnPOWERBROADCAST'),
(PVOID, 'fnINLPUAHDRAWMENU'),
(PVOID, 'fnOPTOUTLPDWORDOPTOUTLPDWORD'),
(PVOID, 'fnOPTOUTLPDWORDOPTOUTLPDWORD_'),
(PVOID, 'fnOUTDWORDINDWORD'),
(PVOID, 'fnOUTLPRECT'),
(PVOID, 'fnOUTSTRING'),
(PVOID, 'fnPOPTINLPUINT3'),
(PVOID, 'fnPOUTLPINT2'),
(PVOID, 'fnSENTDDEMSG'),
(PVOID, 'fnINOUTSTYLECHANGE'),
(PVOID, 'fnHkINDWORD'),
(PVOID, 'fnHkINLPCBTACTIVATESTRUCT'),
(PVOID, 'fnHkINLPCBTCREATESTRUCT'),
(PVOID, 'fnHkINLPDEBUGHOOKSTRUCT'),
(PVOID, 'fnHkINLPMOUSEHOOKSTRUCTEX'),
(PVOID, 'fnHkINLPKBDLLHOOKSTRUCT'),
(PVOID, 'fnHkINLPMSLLHOOKSTRUCT'),
(PVOID, 'fnHkINLPMSG'),
(PVOID, 'fnHkINLPRECT'),
(PVOID, 'fnHkOPTINLPEVENTMSG'),
(PVOID, 'xxxClientCallDelegateThread'),
(PVOID, 'ClientCallDummyCallback'),
(PVOID, 'fnKEYBOARDCORRECTIONCALLOUT'),
(PVOID, 'fnOUTLPCOMBOBOXINFO'),
(PVOID, 'fnINLPCOMPAREITEMSTRUCT2'),
(PVOID, 'xxxClientCallDevCallbackCapture'),
(PVOID, 'xxxClientCallDitThread'),
(PVOID, 'xxxClientEnableMMCSS'),
(PVOID, 'xxxClientUpdateDpi'),
(PVOID, 'xxxClientExpandStringW'),
(PVOID, 'ClientCopyDDEIn1'),
(PVOID, 'ClientCopyDDEIn2'),
(PVOID, 'ClientCopyDDEOut1'),
(PVOID, 'ClientCopyDDEOut2'),
(PVOID, 'ClientCopyImage'),
(PVOID, 'ClientEventCallback'),
(PVOID, 'ClientFindMnemChar'),
(PVOID, 'ClientFreeDDEHandle'),
(PVOID, 'ClientFreeLibrary'),
(PVOID, 'ClientGetCharsetInfo'),
(PVOID, 'ClientGetDDEFlags'),
(PVOID, 'ClientGetDDEHookData'),
(PVOID, 'ClientGetListboxString'),
(PVOID, 'ClientGetMessageMPH'),
(PVOID, 'ClientLoadImage'),
(PVOID, 'ClientLoadLibrary'),
(PVOID, 'ClientLoadMenu'),
(PVOID, 'ClientLoadLocalT1Fonts'),
(PVOID, 'ClientPSMTextOut'),
(PVOID, 'ClientLpkDrawTextEx'),
(PVOID, 'ClientExtTextOutW'),
(PVOID, 'ClientGetTextExtentPointW'),
(PVOID, 'ClientCharToWchar'),
(PVOID, 'ClientAddFontResourceW'),
(PVOID, 'ClientThreadSetup'),
(PVOID, 'ClientDeliverUserApc'),
(PVOID, 'ClientNoMemoryPopup'),
(PVOID, 'ClientMonitorEnumProc'),
(PVOID, 'ClientCallWinEventProc'),
(PVOID, 'ClientWaitMessageExMPH'),
(PVOID, 'ClientWOWGetProcModule'),
(PVOID, 'ClientWOWTask16SchedNotify'),
(PVOID, 'ClientImmLoadLayout'),
(PVOID, 'ClientImmProcessKey'),
(PVOID, 'fnIMECONTROL'),
(PVOID, 'fnINWPARAMDBCSCHAR'),
(PVOID, 'fnGETTEXTLENGTHS2'),
(PVOID, 'fnINLPKDRAWSWITCHWND'),
(PVOID, 'ClientLoadStringW'),
(PVOID, 'ClientLoadOLE'),
(PVOID, 'ClientRegisterDragDrop'),
(PVOID, 'ClientRevokeDragDrop'),
(PVOID, 'fnINOUTMENUGETOBJECT'),
(PVOID, 'ClientPrinterThunk'),
(PVOID, 'fnOUTLPCOMBOBOXINFO2'),
(PVOID, 'fnOUTLPSCROLLBARINFO'),
(PVOID, 'fnINLPUAHDRAWMENU2'),
(PVOID, 'fnINLPUAHDRAWMENUITEM'),
(PVOID, 'fnINLPUAHDRAWMENU3'),
(PVOID, 'fnINOUTLPUAHMEASUREMENUITEM'),
(PVOID, 'fnINLPUAHDRAWMENU4'),
(PVOID, 'fnOUTLPTITLEBARINFOEX'),
(PVOID, 'fnTOUCH'),
(PVOID, 'fnGESTURE'),
(PVOID, 'fnPOPTINLPUINT4'),
(PVOID, 'fnPOPTINLPUINT5'),
(PVOID, 'xxxClientCallDefaultInputHandler'),
(PVOID, 'fnEMPTY'),
(PVOID, 'ClientRimDevCallback'),
(PVOID, 'xxxClientCallMinTouchHitTestingCallback'),
(PVOID, 'ClientCallLocalMouseHooks'),
(PVOID, 'xxxClientBroadcastThemeChange'),
(PVOID, 'xxxClientCallDevCallbackSimple'),
(PVOID, 'xxxClientAllocWindowClassExtraBytes'),
(PVOID, 'xxxClientFreeWindowClassExtraBytes'),
(PVOID, 'fnGETWINDOWDATA'),
(PVOID, 'fnINOUTSTYLECHANGE2'),
(PVOID, 'fnHkINLPMOUSEHOOKSTRUCTEX2'),
]
class PEB(pstruct.type, versioned):
'''
0x0098 NT 3.51
0x0150 NT 4.0
0x01E8 Win2k
0x020C XP
0x0230 WS03
0x0238 Vista
0x0240 Win7_BETA
0x0248 Win6
0x0250 Win8
0x045C Win10
'''
class BitField(pbinary.flags):
_fields_ = [
(1, 'ImageUsesLargePages'),
(1, 'IsProtectedProcess'),
(1, 'IsLegacyProcess'),
(1, 'IsImageDynamicallyRelocated'),
(1, 'SkipPatchingUser32Forwarders'),
(1, 'SpareBits'),
]
class CrossProcessFlags(pbinary.flags):
_fields_ = [
(1, 'ProcessInJob'),
(1, 'ProcessInitializing'),
(1, 'ProcessUsingVEH'),
(1, 'ProcessUsingVCH'),
(1, 'ProcessUsingFTH'),
(27, 'ReservedBits0'),
]
class NtGlobalFlag(pbinary.flags):
def __init__(self, **attrs):
super(PEB.NtGlobalFlag, self).__init__(**attrs)
f = []
f.extend([
(1, 'FLG_STOP_ON_EXCEPTION'), # 0x00000001
(1, 'FLG_SHOW_LDR_SNAPS'), # 0x00000002
(1, 'FLG_DEBUG_INITIAL_COMMAND'), # 0x00000004
(1, 'FLG_STOP_ON_HUNG_GUI'), # 0x00000008
(1, 'FLG_HEAP_ENABLE_TAIL_CHECK'), # 0x00000010
(1, 'FLG_HEAP_ENABLE_FREE_CHECK'), # 0x00000020
(1, 'FLG_HEAP_VALIDATE_PARAMETERS'), # 0x00000040
(1, 'FLG_HEAP_VALIDATE_ALL'), # 0x00000080
(1, 'FLG_POOL_ENABLE_TAIL_CHECK'), # 0x00000100
(1, 'FLG_POOL_ENABLE_FREE_CHECK'), # 0x00000200
(1, 'FLG_POOL_ENABLE_TAGGING'), # 0x00000400
(1, 'FLG_HEAP_ENABLE_TAGGING'), # 0x00000800
(1, 'FLG_USER_STACK_TRACE_DB'), # 0x00001000
(1, 'FLG_KERNEL_STACK_TRACE_DB'), # 0x00002000
(1, 'FLG_MAINTAIN_OBJECT_TYPELIST'), # 0x00004000
(1, 'FLG_HEAP_ENABLE_TAG_BY_DLL'), # 0x00008000
(1, 'FLG_IGNORE_DEBUG_PRIV'), # 0x00010000
(1, 'FLG_ENABLE_CSRDEBUG'), # 0x00020000
(1, 'FLG_ENABLE_KDEBUG_SYMBOL_LOAD'), # 0x00040000
(1, 'FLG_DISABLE_PAGE_KERNEL_STACKS'), # 0x00080000
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WINXP:
f.append((1, 'FLG_HEAP_ENABLE_CALL_TRACING')) #0x00100000
else:
f.append((1, 'FLG_ENABLE_SYSTEM_CRIT_BREAKS')) #0x00100000
f.extend([
(1, 'FLG_HEAP_DISABLE_COALESCING'), # 0x00200000
(1, 'FL |
drolando/SoftDev | tests/fife_test/tests/MultiPathfinderTest.py | Python | lgpl-2.1 | 7,856 | 0.032332 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import random
from fife import fife
from fife.extensions import pychan
from fife.extensions.pychan.tools import callbackWithArguments as cbwa
from fife.extensions.fife_timer import Timer
import scripts.test as test
class KeyListener(fife.IKeyListener):
def __init__(self, test):
self._engine = test._engine
self._test = test
self._eventmanager = self._engine.getEventManager()
fife.IKeyListener.__init__(self)
def keyPressed(self, evt):
keyval = evt.getKey().getValue()
keystr = evt.getKey().getAsString().lower()
if keystr == "t":
r = self._test._camera.getRenderer('GridRenderer')
r.setEnabled(not r.isEnabled())
def keyReleased(self, evt):
pass
class MouseListener(fife.IMouseListener):
def __init__(self, test):
self._engine = test._engine
self._test = test
self._eventmanager = self._engine.getEventManager()
fife.IMouseListener.__init__(self)
def mousePressed(self, event):
if event.isConsumedByWidgets():
return
clickpoint = fife.ScreenPoint(event.getX(), event.getY())
self._test.movePlayer(clickpoint)
def mouseReleased(self, event):
pass
def mouseMoved(self, event):
self._test.mouseMoved(event)
def mouseEntered(self, event):
pass
def mouseExited(self, event):
pass
def mouseClicked(self, event):
pass
def mouseWheelMovedUp(self, event):
self._test.setZoom(-0.1)
def mouseWheelMovedDown(self, event):
self._test.setZoom(0.1)
def mouseDragged(self, event):
pass
class InstanceActionListener(fife.InstanceActionListener):
def __init__(self, test):
self._engine = te | st._engine
self._test = test
fife.InstanceActionListener.__init__(self)
def onInstanceActionFinished(self, instance, action):
instance.move('walk', self._test.createRandomTarget(), 4.0)
def onInstanceActionCancelled(self, instance, action):
pass
def onInstanceActionFr | ame(self, instance, action, frame):
pass
class MultiPathfinderTest(test.Test):
def create(self, engine, application):
self._application = application
self._engine = engine
self._running = False
self._loader = fife.MapLoader(self._engine.getModel(),
self._engine.getVFS(),
self._engine.getImageManager(),
self._engine.getRenderBackend())
self._eventmanager = self._engine.getEventManager()
self._imagemanager = self._engine.getImageManager()
def destroy(self):
#any left over cleanup here
pass
def run(self):
random.seed()
self._running = True
self._mouselistener = MouseListener(self)
self._eventmanager.addMouseListener(self._mouselistener)
self._keylistener = KeyListener(self)
self._eventmanager.addKeyListener(self._keylistener)
self._actionlistener = InstanceActionListener(self)
self._font = pychan.internal.get_manager().createFont("data/fonts/rpgfont.png")
if self._font is None:
raise InitializationError("Could not load font %s" % name)
self.loadMap("data/maps/multipathfinder_grassland.xml")
def stop(self):
self._running = False
self._engine.getModel().deleteMap(self._map)
self._engine.getModel().deleteObjects()
self._eventmanager.removeMouseListener(self._mouselistener)
self._eventmanager.removeKeyListener(self._keylistener)
del self._mouselistener
del self._keylistener
def isRunning(self):
return self._running
def getName(self):
return "MultiPathfinderTest"
def getAuthor(self):
return "helios"
def getDescription(self):
return "Use this as a template for more complicated tests."
def getHelp(self):
return open( 'data/help/MultiPathfinderTest.txt', 'r' ).read()
def pump(self):
"""
This gets called every frame that the test is running. We have nothing
to do here for this test.
"""
pass
def loadMap(self, filename):
"""
Simple function to load and display a map file. We could of course
have passed in the map filename but I'll leave that up to you.
@param filename The filename.
"""
self._mapfilename = filename
if self._loader.isLoadable(self._mapfilename):
self._map = self._loader.load(self._mapfilename)
self._mapLoaded = True
self._camera = self._map.getCamera("camera1")
self._actorlayer = self._map.getLayer("item_layer")
self._groundlayer = self._map.getLayer("ground_layer")
self._player = self._actorlayer.getInstance("player")
self._frigate1 = self._actorlayer.getInstance("frigate1")
self._frigate1.actOnce("stand", self._frigate1.getFacingLocation())
self._frigate1.addActionListener(self._actionlistener)
self._frigate2 = self._actorlayer.getInstance("frigate2")
self._frigate2.actOnce("stand", self._frigate2.getFacingLocation())
self._frigate2.addActionListener(self._actionlistener)
self._camera.setLocation(self._player.getLocation())
self._camera.attach(self._player)
self._instance_renderer = fife.InstanceRenderer.getInstance(self._camera)
cellrenderer = fife.CellRenderer.getInstance(self._camera)
cellrenderer.addActiveLayer(self._actorlayer)
cellrenderer.setEnabledBlocking(True)
cellrenderer.setEnabledPathVisual(True)
cellrenderer.addPathVisual(self._player)
cellrenderer.addPathVisual(self._frigate1)
cellrenderer.addPathVisual(self._frigate2)
cellrenderer.setEnabled(True)
coordinaterenderer = fife.CoordinateRenderer.getInstance(self._camera)
coordinaterenderer.setFont(self._font)
coordinaterenderer.addActiveLayer(self._actorlayer)
#coordinaterenderer.setEnabled(True)
gridrenderer = self._camera.getRenderer('GridRenderer')
gridrenderer.activateAllLayers(self._map)
def mouseMoved(self, event):
self._instance_renderer.removeOutlined(self._player)
pt = fife.ScreenPoint(event.getX(), event.getY())
instances = self._camera.getMatchingInstances(pt, self._actorlayer)
for i in instances:
if i.getId() == "player":
self._instance_renderer.addOutlined(i, 173, 255, 47, 2, 250)
break
def setZoom(self, zoom):
self._camera.setZoom(self._camera.getZoom() + zoom)
def getLocationAt(self, screenpoint):
"""
Query the main camera for the Map location (on the actor layer)
that a screen point refers to.
@param screenpoint A fife.ScreenPoint
"""
target_mapcoord = self._camera.toMapCoordinates(screenpoint, False)
target_mapcoord.z = 0
location = fife.Location(self._actorlayer)
location.setMapCoordinates(target_mapcoord)
return location
def createRandomTarget(self):
x = random.randint(-13, 22)
y = random.randint(-28, 13)
mc = fife.ModelCoordinate(x,y)
location = fife.Location(self._actorlayer)
location.setLayerCoordinates(mc)
return location
def movePlayer(self, screenpoint):
"""
Simple function that moves the player instance to the given screenpoint.
@param screenpoint A fife.ScreenPoint
"""
self._player.move('walk', self.getLocationAt(screenpoint), 4.0)
|
saltastro/timDIMM | weather.py | Python | bsd-3-clause | 3,654 | 0.000547 | #!/usr/bin/env python
import sys
import html5lib
import urllib2
from numpy import median, array
from xml_icd import parseICD
from html5lib import treebuilders
def salt():
wx = {}
try:
tcs = parseICD("http://icd.salt/xml/salt-tcs-icd.xml")
time = t | cs['tcs xml time info']
bms = tcs['bms external conditions']
temps = bms['Te | mperatures']
wx["Temp"] = median(array(temps.values()))
wx["Temp 2m"] = temps["2m"]
wx["Temp 30m"] = temps["30m"]
# get time
wx["SAST"] = time["SAST"].split()[1]
wx["Date"] = time["SAST"].split()[0]
# set up other values of interest
wx["Air Pressure"] = bms["Air pressure"] * 10.0
wx["Dewpoint"] = bms["Dewpoint"]
wx["RH"] = bms["Rel Humidity"]
wx["Wind Speed (30m)"] = bms["Wind mag 30m"] * 3.6
wx["Wind Speed"] = bms["Wind mag 10m"] * 3.6
wx["Wind Dir (30m)"] = bms["Wind dir 30m"]
wx["Wind Dir"] = bms["Wind dir 10m"]
wx["T - DP"] = wx["Temp 2m"] - bms["Dewpoint"]
wx["Raining"] = bms["Rain detected"]
return wx
except:
return False
def wasp():
wx = {}
try:
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
doc = p.parse(urllib2.urlopen("http://swaspgateway.suth/",
timeout=1).read())
t = doc.getElementsByTagName("table")[0]
tds = t.getElementsByTagName("td")
wx["Temp"] = float(tds[7].firstChild.nodeValue)
if tds[10].firstChild.nodeValue == "RAIN":
wx["Sky"] = "Rain"
wx["Sky Temp"] = wx["Temp"]
else:
sky, stemp = tds[10].firstChild.nodeValue.split('(')
stemp = stemp[0:-1]
wx["Sky"] = sky
wx["Sky Temp"] = stemp
wx["T - DP"] = float(tds[9].firstChild.nodeValue)
wx["RH"] = float(tds[8].firstChild.nodeValue)
tds[6].normalize()
wx["Wind Dir"] = tds[6].firstChild.nodeValue[1:]
wx["Wind Speed"] = float(tds[5].firstChild.nodeValue)
rain = tds[4].firstChild.nodeValue
if rain == "DRY":
wx["Raining"] = False
else:
wx["Raining"] = True
wx["UT"] = tds[3].firstChild.nodeValue.strip()
tds[31].normalize()
wx["Status"] = tds[31].firstChild.nodeValue.strip()
return wx
except:
return False
def grav():
wx = {}
p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
kan11 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan11.htm",
timeout=1).read())
kan16 = p.parse(urllib2.urlopen("http://sg1.suth/tmp/kan16.htm",
timeout=1).read())
kan11_tds = kan11.getElementsByTagName("td")
kan16_tds = kan16.getElementsByTagName("td")
wx["Date"], wx["UT"] = kan11_tds[12].firstChild.nodeValue.split()
kan11_tds[14].normalize()
kan11_tds[15].normalize()
wx["Temp"] = float(kan11_tds[14].firstChild.nodeValue)
wx["RH"] = float(kan11_tds[15].firstChild.nodeValue)
kan16_tds[13].normalize()
kan16_tds[14].normalize()
wx["Wind Dir"] = int(kan16_tds[13].firstChild.nodeValue)
wx["Wind Speed"] = float(kan16_tds[14].firstChild.nodeValue) * 3.6
return wx
if __name__ == '__main__':
if len(sys.argv) == 1:
print "Usage: weather.py <salt|wasp|grav>"
else:
wx = eval("%s()" % sys.argv[1].lower())
if wx:
for k, v in sorted(wx.items()):
print "%20s : \t %s" % (k, v)
else:
print "No information received."
|
dreamibor/Algorithms-and-Data-Structures-Using-Python | practice/implementation/two_pointers/sort_colors.py | Python | gpl-3.0 | 2,266 | 0.008392 | """
Two pointers - Sort Colours / Dutch National Flag Problem (medium)
Description:
Given an array containing 0s, 1s and 2s, sort the array in-place. You should treat numbers
of the array as objects, hence, we can’t count 0s, 1s, and 2s to recreate the array.
The flag of the Netherlands consists of three colors: red, white and blue; and since our
input array also consists of three different numbers that is why it is called Dutch National
Flag problem. It's first proposed by computer scientist Edsger Wybe Dijkstra.
Example:
Input | : [2, 0, 2, 1, 1, 0]
Output: [0, 0, 1, 1, 2, 2]
Notes:
1. We shall modify the array in-place.
Time Complexity - O(N) - N is the total number of elements in the given array.
Space Complexity - O(1) - Constant space complexity.
LeetCode link: https://leetcode-cn.com/problems/sort-colors/
"""
def sort_colours(nums:list) -> list:
""" Two pointers - similar to the idea of `partition` in quick sort.
| Initially, we set two pointers called `low` and `high` which are pointing at the first
element and the last element in the array.
While iterating, we will move all 0s before low and all 2s after high, so in the end,
all 1s will be between low and high.
Since the high pointer is moving from right to left, so when we iterate the array from
left to right, if we've passed the position of high, then the iteration shall stop.
It's worthy to note that if we've found 2 and exchanged the position, we don't need to
increment i since the exchanged value to index i could be 0, 1 or 2, so we need to check
it again.
"""
low = 0
high = len(nums) - 1
i = 0
while i <= high:
if nums[i] == 0:
nums[i], nums[low] = nums[low], nums[i]
# Increment low and i
low += 1
i += 1
elif nums[i] == 2:
nums[i], nums[high] = nums[high], nums[i]
# Decrement only high, since after swap, the number at index
# i could be 0, 1, 2, so we will go through another loop.
high -= 1
else:
# numss[i] == 1
i += 1
return nums
if __name__ == "__main__":
print(sort_colours([1, 0, 2, 1, 0]))
print(sort_colours([2, 2, 0, 1, 2, 0]))
|
FlexBE/generic_flexbe_states | flexbe_utility_states/src/flexbe_utility_states/publish_twist_state.py | Python | bsd-3-clause | 850 | 0.025882 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from flexbe_core.proxy import ProxyPublisher
from geometry_msgs.msg import Twist
"""Created on June. 21, 2017
@author: Alireza Hosseini
"""
class PublishTwistState(EventState):
"""
Publishes a velocity command from userdata.
-- t | opic string Topic to which the velocity command will be published.
># twist Twist Velocity command to be published.
<= done Velcoity command has been published.
"""
def __init__(self, topic):
"""Cons | tructor"""
super(PublishTwistState, self).__init__(outcomes=['done'],
input_keys=['twist'])
self._topic = topic
self._pub = ProxyPublisher({self._topic: Twist})
def execute(self, userdata):
return 'done'
def on_enter(self, userdata):
self._pub.publish(self._topic, userdata.twist)
|
winkidney/wei-dev | setup.py | Python | gpl-2.0 | 329 | 0 | from | setuptools import setup, find_packages
setup(
name="wei-dev",
version="0.2.0",
packages=find_packages(),
description="DevTool for weichat development and testing, "
| "GUI and CLI tool and testing util-library included.",
install_requires=(
"cmdtree",
"requests",
),
)
|
patrickleweryharris/anagram-solver | anagram_solver/__main__.py | Python | mit | 145 | 0 | # -*- | coding: utf-8 -*-
"""anagram_solver.__main__: executed when directory is called as script."""
from .anagram_solv | er import main
main()
|
yarocoder/radwatch-analysis | Isotopic_Abudance.py | Python | mit | 25,349 | 0.000039 | """Isotopic Abundances for each isotope"""
class Natural_Isotope(object):
def __init__(self, symbol, mass_number, mass, isotopic_abundance,
cross_section):
self.symbol = symbol
self.mass_number = mass_number
self.mass = mass
self.isotopic_abundance = .01 * isotopic_abundance
self.cross_section = cross_section
Hydrogen_1 = Natural_Isotope("H", 1, 1.007825, 99.9885, 0.332015)
Hydrogen_2 = Natural_Isotope("H", 2, 2.014102, 0.0115, 0.000505706)
Helium_3 = Natural_Isotope("He", 3, 3.016029, 0.000137, 5.49873e-05)
Helium_4 = Natural_Isotope("He", 4, 4.002603, 99.999893, 0)
Lithium_6 = Natural_Isotope("Li", 6, 6.015122, 7.59, 0.0385003)
Lithium_7 = Natural_Isotope("Li", 7, 7.016004, 92.41, 0.045402)
Beryllium_9 = Natural_Isotope("Be", 9, 9.012182, 100, 0.0100308)
Boron_10 = Natural_Isotope("B", 10, 10.012937, 19.9, 0.499871)
Boron_11 = Natural_Isotope("B", 11, 11.009305, 80.1, 0.00550004)
Carbon_12 = Natural_Isotope("C", 12, 12, 98.93, 0.0035)
Carbon_13 = Natural_Isotope("C", 13, 13.003355, 1.07, 0.0014)
Nitrogen_14 = Natural_Isotope("N", 14, 14.003074, 99.632, 0.0749913)
Nitrogen_15 = Natural_Isotope("N", 15, 15.000109, 0.368, 2.40099e-05)
Oxygen_16 = Natural_Isotope("O", 16, 15.994915, 99.757, 0.000189986)
Oxygen_17 = Natural_Isotope("O", 17, 16.999132, 0.038, 0.00382862)
Oxygen_18 = Natural_Isotope("O", 18, 17.9916, 0.205, 0.00016)
Fluorine_19 = Natural_Isotope("F", 19, 18.998403, 100, 0.00957834)
Neon_20 = Natural_Isotope("Ne", 20, 19.99244, 90.48, 0.040)
Neon_21 = Natural_Isotope("Ne", 21, 20.993847, 0.27, 0.7)
Neon_22 = Natural_Isotope("Ne", 22, 21.991386, 9.25, 0.05)
Sodium_23 = Natural_Isotope("Na", 23, 22.98977, 100, 0.528001)
Magnesium_24 = Natural_Isotope("Mg", 24, 23.985042, 78.99, 0.0502894)
Magnesium_25 = Natural_Isotope("Mg", 25, 24.985837, 10, 0.190374)
Magnesium_26 = Natural_Isotope("Mg", 26, 25.982593, 11.01, 0.03831)
Aluminum_27 = Natural_Isotope("Al", 27, 26.981538, 100, 0.233463)
Silicon_28 = Natural_Isotope("Si", 28, 27.976927, 92.2297, 0.169141)
Silicon_29 = Natural_Isotope("Si", 29, 28.976495, 4.6832, 0.119961)
Silicon_30 = Natural_Isotope("Si", 30, 29.97377, 3.0872, 0.107085)
Phosphorus_31 = Natural_Isotope("P", 31, 30.973762, 100, 0.169361)
Sulfur_32 = Natural_Isotope("S", 32, 31.972071, 94.93, 0.528215)
Sulfur_33 = Natural_Isotope("S", 33, 32.971458, 0.76, 0.350075)
Sulfur_34 = Natural_Isotope("S", 34, 33.967867, 4.29, 0.223618)
Sulfur_36 = Natural_Isotope("S", 36, 35.967081, 0.02, 0.150482)
Chlorine_35 = Natural_Isotope("Cl", 35, 34.968853, 75.78, 43.6122)
Chlorine_37 = Natural_Isotope("Cl", 37, 36.965903, 24.22, 0.43311)
Argon_36 = Natural_Isotope("Ar", 36, 35.967546, 0.3365, 5.04467)
Argon_38 = Natural_Isotope("Ar", 38, 37.962732, 0.0632, 0.80184)
Argon_40 = Natural_Isotope("Ar", 40, 39.962383, 99.6003, 0.660152)
Potassium_39 = Natural_Isotope("K", 39, 38.963707, 93.2581, 2.12742)
Potassium_40 = Natural_Isotope("K", 40, 39.963999, 0.0117, 30.0098)
Potassium_41 = Natural_Isotope("K", 41, 40.961826, 6.7302, 1.46113)
Calcium_40 = Natural_Isotope("Ca", 40, 39.962591, 96.941, 0.407588)
Calcium_42 = Natural_Isotope("Ca", 42, 41.958618, 0.647, 0.683094)
Calcium_43 = Natural_Isotope("Ca", 43, 42.958768, 0.135, 11.6649)
Calcium_44 = Natural_Isotope("Ca", 44, 43.955481, 2.086, 0.888633)
Calcium_46 = Natural_Isotope("Ca", 46, 45.953693, 0.004, 0.740179)
Calcium_48 = Natural_Isotope("Ca", 48, 47.952534, 0.187, 1.09293)
Scandium_45 = Natural_Isotope("Sc", 45, 44.96691, 100, 27.1628)
Titanium_46 = Natural_Isotope("Ti", 46, 45.952629, 8.25, 0.589748)
Titanium_47 = Natural_Isotope("Ti", 47, 46.951764, 7.44, 1.62638)
Titanium_48 = Natural_Isotope("Ti", 48, 47.947947, 73.72, 8.31791)
Titanium_49 = Natural_Isotope("Ti", 49, 48.947871, 5.41, 1.86282)
Titanium_50 = Natural_Isotope("Ti", 50, 49.944792, 5.18, 0.179537)
Vanadium_50 = Natural_Isotope("V", 50, 49.947163, 0.25, 44.6849)
Vanadium_51 = Natural_Isotope("V", 51, 50.943964, 99.75, 4.91912)
Chromium_50 = Natural_Isotope("Cr", 50, 49.94605, 4.345, 15.4049)
Chromium_52 = Natural_Isotope("Cr", 52, 51.940512, 83.789, 0.856093)
Chromium_53 = Natural_Isotope("Cr", 53, 52.940654, 9.501, 18.0927)
Chromium_54 = Natural_Isotope("Cr", 54, 53.93 | 8885, 2.365, 0.411198)
Manganese_55 = Natural_Isotope("Mn", 55, 54.93805, 100, 13.2784)
Iron_54 = Natural_Isotope("Fe", 54, 53.939615, 5.845, 2.25 | 193)
Iron_56 = Natural_Isotope("Fe", 56, 55.934942, 91.754, 2.58936)
Iron_57 = Natural_Isotope("Fe", 57, 56.935399, 2.119, 2.42654)
Iron_58 = Natural_Isotope("Fe", 58, 57.93328, 0.282, 1.14965)
Cobalt_59 = Natural_Isotope("Co", 59, 58.9332, 100, 37.1837)
Nickel_58 = Natural_Isotope("Ni", 58, 57.935348, 68.0769, 4.22661)
Nickel_60 = Natural_Isotope("Ni", 60, 59.930791, 26.2231, 2.40101)
Nickel_61 = Natural_Isotope("Ni", 61, 60.93106, 1.1399, 2.5094)
Nickel_62 = Natural_Isotope("Ni", 62, 61.928349, 3.6345, 14.9058)
Nickel_64 = Natural_Isotope("Ni", 64, 63.92797, 0.9256, 1.48038)
Copper_63 = Natural_Isotope("Cu", 63, 62.929601, 69.17, 4.47031)
Copper_65 = Natural_Isotope("Cu", 65, 64.927794, 30.93, 2.14927)
Zinc_64 = Natural_Isotope("Zn", 64, 63.929147, 48.63, 0.787472)
Zinc_66 = Natural_Isotope("Zn", 66, 65.926037, 27.9, 0.617964)
Zinc_67 = Natural_Isotope("Zn", 67, 66.927131, 4.1, 7.47184)
Zinc_68 = Natural_Isotope("Zn", 68, 67.924848, 18.75, 1.0655)
Zinc_70 = Natural_Isotope("Zn", 70, 69.925325, 0.62, 0.0917385)
Gallium_69 = Natural_Isotope("Ga", 69, 68.925581, 60.108, 1.73069)
Gallium_71 = Natural_Isotope("Ga", 71, 70.924705, 39.892, 4.73143)
Germanium_70 = Natural_Isotope("Ge", 70, 69.92425, 20.84, 3.05256)
Germanium_72 = Natural_Isotope("Ge", 72, 71.922076, 27.45, 0.885938)
Germanium_73 = Natural_Isotope("Ge", 73, 72.923459, 7.73, 14.705)
Germanium_74 = Natural_Isotope("Ge", 74, 73.921178, 36.28, 0.519036)
Germanium_76 = Natural_Isotope("Ge", 76, 75.921403, 7.61, 0.154659)
Arsenic_75 = Natural_Isotope("As", 75, 74.921596, 100, 4.50161)
Selenium_74 = Natural_Isotope("Se", 74, 73.922477, 0.89, 51.8151)
Selenium_76 = Natural_Isotope("Se", 76, 75.919214, 9.37, 85.0218)
Selenium_77 = Natural_Isotope("Se", 77, 76.919915, 7.63, 42.0096)
Selenium_78 = Natural_Isotope("Se", 78, 77.91731, 23.77, 0.430126)
Selenium_80 = Natural_Isotope("Se", 80, 79.91652, 49.61, 0.610202)
Selenium_82 = Natural_Isotope("Se", 81, 81.9167, 8.73, 0.0444242)
Bromine_79 = Natural_Isotope("Br", 79, 78.918338, 50.69, 11.0042)
Bromine_81 = Natural_Isotope("Br", 81, 80.916291, 49.31, 2.3651)
Krypton_78 = Natural_Isotope("Kr", 78, 77.920386, 0.35, 6.35568)
Krypton_80 = Natural_Isotope("Kr", 80, 79.916378, 2.28, 11.5046)
Krypton_82 = Natural_Isotope("Kr", 82, 81.913485, 11.58, 19.1672)
Krypton_83 = Natural_Isotope("Kr", 83, 82.914136, 11.49, 198.19)
Krypton_84 = Natural_Isotope("Kr", 84, 83.911507, 57, 0.110022)
Krypton_86 = Natural_Isotope("Kr", 86, 85.91061, 17.3, 0.000878224)
Rubidium_85 = Natural_Isotope("Rb", 85, 84.911789, 72.17, 0.493607)
Rubidium_87 = Natural_Isotope("Rb", 86, 86.909183, 27.83, 0.120037)
Strontium_84 = Natural_Isotope("Sr", 84, 83.913425, 0.56, 0.82219)
Strontium_86 = Natural_Isotope("Sr", 86, 85.909262, 9.86, 1.00556)
Strontium_87 = Natural_Isotope("Sr", 87, 86.908879, 7, 16.0085)
Strontium_88 = Natural_Isotope("Sr", 88, 87.905848, 82.58, 0.00868793)
Yttrium_89 = Natural_Isotope("Y", 89, 88.905848, 100, 1.2787)
Zirconium_90 = Natural_Isotope("Zr", 90, 89.904704, 51.45, 0.00997575)
Zirconium_91 = Natural_Isotope("Zr", 91, 90.905645, 11.22, 1.21603)
Zirconium_92 = Natural_Isotope("Zr", 92, 91.90504, 17.15, 0.229231)
Zirconium_94 = Natural_Isotope("Zr", 94, 93.906316, 17.38, 0.0498845)
Zirconium_96 = Natural_Isotope("Zr", 96, 95.908276, 2.8, 0.0228521)
Niobium_93 = Natural_Isotope("Nb", 93, 92.906378, 100, 1.15554)
Molybdenum_92 = Natural_Isotope("Mo", 92, 91.90681, 14.84, 0.0798857)
Molybdenum_94 = Natural_Isotope("Mo", 94, 93.905088, 9.25, 0.340371)
Molybdenum_95 = Natural_Isotope("Mo", 95, 94.905841, 15.92, 13.3957)
Molybdenum_96 = Natural_Isotope("Mo", 96, 95.904679, 16.68, 0.595576)
Molybdenum_97 = Natural_Isotope("Mo", 97, 96.906021, 9.55, 2.19677)
Molybdenum_98 = Natural_Isotope("Mo", 98, 97.905408, 24.13, 0.130026)
Molybdenum_100 = Natural |
jpacg/su-binary | jni/selinux/gui/mappingsPage.py | Python | gpl-2.0 | 1,873 | 0.009076 | ## mappingsPage.py - show selinux mappings
## Copyright (C) 2006 Red Hat, Inc.
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
## Author: Dan Walsh
import string
import gtk
import gtk.glade
import os
import gobject
import sys
import seobject
##
## I18N
##
PROGNAME = "policycoreutils"
try:
import gettext
kwargs = {}
if sys.version_info < (3,):
kwargs['unicode'] = True
gettext.install(PROGNAME,
localedir="/usr/share/locale",
codeset='utf-8',
**kwargs)
except:
try:
import builtins
builtins.__dict__['_'] = str
except ImportError:
im | port __builtin__
__builtin__.__dict__['_'] = unicode
class loginsPage:
def __init__(self, xml):
self.xml = xml
self.view = xml.get_widget("mappingsView")
self.store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
self.s | tore.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.view.set_model(self.store)
self.login = loginRecords()
dict = self.login.get_all(0)
for k in sorted(dict.keys()):
print("%-25s %-25s %-25s" % (k, dict[k][0], translate(dict[k][1])))
|
mbinette91/ConstructionLCA | webapp.py | Python | gpl-2.0 | 7,974 | 0.038375 | import os
import threading
import urlparse
import time
from SimpleHTTPServer import SimpleHTTPRequestHandler
from ModelBuilder import ModelBuilder
import pickle
import sqlite3
import json
db = None;
def GetUniqueProjectId():
global db
filename = "../temp.db"
if not db:
db = {'last_project_id': 0}
if os.path.isfile(filename):
file = open(filename, 'r')
db = pickle.load(file)
file.close()
db['last_project_id'] += 1
file = open(filename, 'w')
pickle.dump(db, file);
file.close()
return db['last_project_id']
class ProductTreeBuilder:
IGNORED_CLASSES = ["Building", "BuildingStorey", "Space"]
def __init__(self):
self.data = []
self.last_class_name = None;
self.class_data = []
self.last_class_type = None;
self.class_type_data = [];
self.undefined_data = [];
def add_product_row(self, row):
class_name = row[2].replace("Ifc", "");
if class_name in ProductTreeBuilder.IGNORED_CLASSES:
return;
type = row[3];
product_data = [row[0], row[1]];
if class_name != self.last_class_name:
self.close_class();
self.last_class_name = class_name;
if not type:
self.undefined_data.append(product_data)
else:
if type != self.last_class_type:
self.close_class_type();
self.last_class_type = type;
self.class_type_data.append(product_data)
def close_class_type(self):
if len(self.class_type_data) != 0:
self.class_data.append([self.last_class_type, self.class_type_data]);
self.class_type_data = [];
self.last_class_type = None;
def close_undefined_type(self):
if len(self.undefined_data) != 0:
self.class_data.append(["Others", self.undefined_data]);
self.undefined_data = []
def close_class(self):
self.close_class_type();
self.close_undefined_type();
if len(self.class_data) != 0:
self.data.append([self.last_class_name, self.class_data]);
self.class_data = []
self.last_class_name = None;
def end(self):
self.close_class();
def get_tree(self):
return self.data;
class CustomHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
url = urlparse.urlparse(self.path)
params = urlparse.parse_qs(url.query)
if url.path == "/project":
self.show_project(params)
if url.path == "/project/info":
self.get_project_info(params)
else: # Default
SimpleHTTPRequestHandler.do_GET(self);
def show_project(self, query):
print "Requesting /project with", query
if 'id' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
with open("_project.html") as f:
response = f.read().replace('{PROJECT_ID}', query['id'][0])
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", len(response))
self.end_headers()
self.wfile.write(response)
def get_project_info(self, query):
print "Requesting /project/info with", query
if 'id' not in query or 'get' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
if query['get'][0] == 'tree':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT p.guid, p.name, p.class_name, m.name FROM products p LEFT JOIN materials m ON p.id=m.product_id WHERE project_id=? ORDER BY p.class_name, m.name', (query['id'][0],))
builder = ProductTreeBuilder();
for row in c.fetchall():
builder.add_product_row(row);
builder.end();
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(builder.get_tree(), encoding='latin1'))
return;
elif query['get'][0] == 'info':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn | .cursor()
c.execute('SELECT guid,p.name,description,class_name,m.name,m.thickness,m.layer_name FROM products p LEFT JOIN materials m ON p.id=m.product_id WHERE project_id=?', (query['id'][0],))
data = []
for row in c.fetchall():
data.append({
'guid': row[0],
'name': row[1],
'description': row[2],
'className': row[3],
'material': {'name': row[4], 'thickness': row[5], 'layerName': row[6]}
})
self.send_response(200)
self.sen | d_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(data, encoding='latin1'))
return;
elif query['get'][0] == 'properties':
conn = sqlite3.connect('../database.db3')
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT ps.id, ps.name FROM products p JOIN property_set ps ON p.id=ps.product_id WHERE project_id=? AND p.guid=?', (query['id'][0],query['product_id'][0],))
data = []
for row in c.fetchall():
properties = []
c2 = conn.cursor()
c2.execute('SELECT p.name, p.value FROM property p WHERE property_set_id=?', (row[0],))
for prop_row in c2.fetchall():
properties.append({
'name': prop_row[0],
'value': prop_row[1]
})
data.append({
'name': row[1],
'properties': properties
})
c2.close();
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(json.dumps(data, encoding='latin1'))
return;
def get_tree(self, query):
print "Requesting /project/tree with", query
if 'id' not in query:
self.send_response(302)
self.send_header('Location', '/')
self.end_headers()
return;
def do_POST(self):
url = urlparse.urlparse(self.path)
params = urlparse.parse_qs(url.query)
if url.path == "/project":
self.new_project(params)
else: # Default
SimpleHTTPRequestHandler.do_POST(self);
def new_project(self, query):
id = str(GetUniqueProjectId());
print "Creating new project with id =", id
result = self._upload_ifc(id)
if result[0]:
thread = threading.Thread(target = self._build_unity, args = (id,)) # Start in a new thread
thread.start();
self.send_response(302)
self.send_header('Location', '/project?id='+id)
self.end_headers()
else: # For debugging purposes only
print result;
self.send_response(302)
self.send_header('Location', '/?error='+result[1])
self.end_headers()
def _build_unity(self, model_id):
# This method can take a long time and should NOT be called from the main HTTPHandler's thread.
start_time = time.time()
builder = ModelBuilder(model_id);
builder.build();
def _upload_ifc(self, id):
# Inspired by https://gist.github.com/UniIsland/3346170
boundary = self.headers.plisttext.split("=")[1]
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = "../tmp/IFC_" + id + ".ifc"
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith('\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
if __name__ == "__main__":
import sys
import BaseHTTPServer
os.chdir('webapp/')
HandlerClass = CustomHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
port = 8000
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever() |
BedivereZero/LeeCode | algorithms/0005-longest-palindromic-substring.py | Python | gpl-3.0 | 627 | 0.001595 | class Solution:
def longestPalindrome(self, s: str) -> str:
| f = [[None for _ in range(len(s))]for _ in range(len(s))]
a, b = 0, 0
for length in range(len(s)):
for head in range(len(s)):
tail = head + length
if tail < len(s):
if length < 2:
f[head][tail] = s[head] == s[tail]
else:
f[hea | d][tail] = f[head + 1][tail - 1] and s[head] == s[tail]
if f[head][tail] and b - a < length:
a, b = head, tail
return s[a:b + 1]
|
tianyang-li/de-novo-metatranscriptome-analysis--the-uniform-model | analysis/single_align_analysis_0.py | Python | gpl-3.0 | 8,259 | 0.005933 | #!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
# tmy1018@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import division
import getopt
import sys
from Bio import SeqIO
from single_len_est_0 import single_est_len
from short_contig_analysis_single_0 import single_uniform_contig_pval
from align_analysis_utils_0 import SeqOverlapType, SeqInterval, Chrom
from general_util import median
class SingleContig(SeqInterval):
def __init__(self, c_reads):
"""
c_reads is a list that contains all the reads (SeqInterval)
that form this contig
"""
self.reads = c_reads
super(SingleContig, self).__init__(c_reads[0].low,
c_reads[-1].high)
def coverage(self, read_len):
return len(self.reads) * read_len / (self.reads[-1].high
- self.reads[0].low + 1)
def est_len(self, read_len):
return single_est_len(self.high - self.low + 1,
len(self.reads), read_len)
def uniform_pval(self, read_len, precision=0.01):
read_pos = [0] * (self.reads[-1].high - self.reads[0].low + 1)
for read in self.reads:
read_pos[read.low - self.reads[0].low] += 1
return single_uniform_contig_pval(read_pos, len(self.reads),
read_len, precision)
def max_coverage(self):
cur_end = 0
cur_cover = 1
max_cover = 1
for cur_read in self.reads[1:]:
while self.reads[cur_end].high < cur_read.low:
cur_cover -= 1
cur_end += 1
cur_cover += 1
if cur_cover > max_cover:
max_cover = cur_cover
return max_cover
def nuc_coverage(self):
"""
return a list of the coverage of each nuc in sequence
"""
nuc_cov = []
cur_cov = 0
read_start = 0
read_end = 0
for cur_nuc in xrange(self.reads[0].low, self.reads[-1].high):
while | (read_start < len(self.reads)
and cur_nuc == self.reads[read_start].low):
| cur_cov += 1
read_start += 1
while (read_end < len(self.reads)
and cur_nuc == self.reads[read_end].high + 1):
cur_cov -= 1
read_end += 1
nuc_cov.append(cur_cov)
return nuc_cov
class SingleChrom(Chrom):
"""
here each reference chrom strand is
U00096 and not U00096.2
embl.name (not embl.id)
"""
def assemble_contigs(self, d_max):
if not self.aligns:
return
self.aligns = sorted(self.aligns, cmp=SeqInterval.interval_cmp)
self.contigs = []
cur_contig = [self.aligns[0]]
prev_align = self.aligns[0]
for align in self.aligns[1:]:
if align.low - prev_align.low <= d_max:
cur_contig.append(align)
else:
self.contigs.append(SingleContig(cur_contig))
cur_contig = [align]
prev_align = align
self.contigs.append(SingleContig(cur_contig))
self.contigs = filter(lambda contig: len(contig.reads) >= 3,
self.contigs)
def __init__(self, embl_rec):
super(SingleChrom, self).__init__(embl_rec)
self.aligns = []
def main(args):
embl_file = None
psl_file = None
read_len = None
kmer = None
try:
opts, args = getopt.getopt(args, '', ["embl=", "psl=",
"read-len=", "kmer="])
except getopt.GetoptError as err:
print >> sys.stderr, str(err)
sys.exit(1)
for opt, arg in opts:
if opt == "--read-len":
read_len = int(arg)
if opt == "--embl":
embl_file = arg
if opt == "--psl":
psl_file = arg
if opt == "--kmer":
kmer = int(arg)
if (not embl_file
or not read_len
or not kmer
or not psl_file):
print >> sys.stderr, "missing"
sys.exit(1)
d_max = read_len - kmer + 1
chroms = {}
for embl in SeqIO.parse(embl_file, 'embl'):
chroms[embl.name] = SingleChrom(embl)
with open(psl_file) as psl_in:
for line in psl_in:
row = line.strip().split("\t")
"""
0 matches - Number of bases that match that aren't repeats
1 misMatches - Number of bases that don't match
2 repMatches - Number of bases that match but are part of repeats
3 nCount - Number of 'N' bases
4 qNumInsert - Number of inserts in query
5 qBaseInsert - Number of bases inserted in query
6 tNumInsert - Number of inserts in target
7 tBaseInsert - Number of bases inserted in target
8 strand - '+' or '-' for query strand. For translated alignments,
second '+'or '-' is for genomic strand
9 qName - Query sequence name
10 qSize - Query sequence size
11 qStart - Alignment start position in query
12 qEnd - Alignment end position in query
13 tName - Target sequence name
14 tSize - Target sequence size
15 tStart - Alignment start position in target
16 tEnd - Alignment end position in target
17 blockCount - Number of blocks in the alignment (a block
contains no gaps)
18 blockSizes - Comma-separated list of sizes of each block
19 qStarts - Comma-separated list of starting positions of
each block in query
20 tStarts - Comma-separated list of starting positions of
each block in target
"""
if (int(row[17])
and int(row[18].split(",")[0]) == read_len):
chroms[row[13]].aligns.append(SeqInterval(int(row[15]),
int(row[16]) - 1))
for chrom in chroms.itervalues():
chrom.assemble_contigs(d_max)
for contig in chrom.contigs:
nuc_coverage = contig.nuc_coverage()
est_len = contig.est_len(read_len)
def format_coverage(n_covs):
cov_str = ""
for cur_cov in n_covs:
cov_str = "%s%d," % (cov_str, cur_cov)
cov_str = "[%s]" % cov_str
return cov_str
cov_str = format_coverage(nuc_coverage)
coverage_median = median(nuc_coverage)
coverage_max = max(nuc_coverage)
for found_iv in chrom.iv_find_features(contig):
print contig.low, contig.high - contig.low + 1,
print est_len,
print found_iv.type,
print found_iv.low, found_iv.high - found_iv.low + 1,
print coverage_median, coverage_max, cov_str,
print SeqOverlapType.overlap_type(contig, found_iv)
"""
0 contig.low
1 contig.high - contig.low + 1
2 est_len
3 found_iv.type
4 found_iv.low
5 found_iv.high - found_iv.low + 1
6 coverage median
7 coverage max
8 coverage list
9 overla |
rvianello/rdkit | rdkit/Chem/Pharm2D/LazyGenerator.py | Python | bsd-3-clause | 4,307 | 0.01068 | #
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" lazy generator of 2D pharmacophore signature data
"""
from __future__ import print_function
from rdkit.Chem.Pharm2D import SigFactory, Matcher
raise NotImplementedError('not finished yet')
class Generator(object):
"""
Important attributes:
- mol: the molecules whose signature is being worked with
- sigFactory : the SigFactory object with signature parameters
NOTE: no preprocessing is carried out for _sigFactory_.
It *must* be pre-initialized.
**Notes**
-
"""
def __init__(self, sigFactory, mol, dMat=None, bitCache=True):
""" constructor
**Arguments**
- sigFactory: a signature factory, see class docs
- mol: a molecule, see class docs
- dMat: (optional) a distance matrix for the molecule. If this
is not provided, one will be calculated
- bitCache: (optional) if nonzero, a local cache of which bits
have been queried will be maintained. Otherwise things must
be recalculate each time a bit is queried.
"""
if not isinstance(sigFactory, SigFactory.SigFactory):
raise ValueError('bad factory')
self.sigFactory = sigFactory
self.mol = mol
if dMat is None:
useBO = sigFactory.includeBondOrder
dMat = Chem.GetDistanceMatrix(mol, useBO)
self.dMat = dMat
if bitCache:
self.bits = {}
else:
self.bits = None
featFamilies = [fam for fam in sigFactory.featFactory.GetFeatureFamilies()
if fam not in sigFactory.skipFeats]
nFeats = len(featFamilies)
featMatches = {}
for fam in featFamilies:
featMatches[fam] = []
feats = sigFactory.featFactory.GetFeaturesForMol(mol)
for feat in feats:
if feat.GetFamily() not in sigFactory.skipFeats:
featMatches[feat.GetFamily()].append(feat.GetAtomIds())
featMatches = [None] * nFeats
for i in range(nFeats):
featMatches[i] = sigFactory.featFactory.GetMolFeature()
self.pattMatches = pattMatches
def GetBit(self, idx):
""" returns a bool indicating whether or not the bit is set
"""
if idx < 0 or idx >= self.sig.GetSize():
raise IndexError('Index %d invalid' % (idx))
if self.bits is not None and idx in self.bits:
return self.bits[idx]
tmp = Matcher.GetAtomsMatchingBit(self.sig, idx, self.mol, dMat=self.dMat, justOne=1,
matchingAtoms=self.pattMatches)
if not tmp or len(tmp) == 0:
res = 0
else:
res = 1
if self.bits is not None:
self.bits[idx] = res
return res
def __len__(self):
""" allows class to support len()
"""
return self.sig.GetSize()
def __getitem__(self, itm):
""" allows class to support random access.
Calls self.GetBit()
"""
return self.GetBit(itm)
if __name__ == '__main__':
import time
from rdkit import RDConfig, Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
import random
factory = Gobbi_Pharm2D.factory
nToDo = 100
inD = open(RDConfig.RDDataDir + "/NCI/first_5K.smi", 'r').readlines()[:nToDo]
mols = [None] * len(inD)
for i in range(len(inD)):
smi = inD[i].split('\t')[0]
smi.strip()
mols[i] = Chem.MolFromSmiles(smi)
sig = factory.GetSignature()
nBits = 300
random.seed(23)
bits = [random.randint(0, sig.GetSize() - 1) for x in range(nBits)]
print('Using the Lazy Generator')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
gen = Generator(factory, mols[i])
for bit in bits:
v = gen[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
print('Generating and checking signatures')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
sig = Generate.Gen2DFingerprint(mols[i], factory)
fo | r bit in | bits:
v = sig[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
|
yolanother/ubuntumobidev_ubiquity | ubiquity/frontend/kde_components/testing/partman.py | Python | gpl-3.0 | 10,910 | 0 | # -*- coding: utf-8 -*-
import os
import sys
from PyQt4 import QtGui
from ubiquity.frontend.kde_components.PartMan import PartMan
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
app.setStyle("Oxygen")
PartMan._uidir = '../../../../gui/qt'
styleFile = os.path.join(PartMan._uidir, "style.qss")
with open(styleFile, 'r') as sf:
app.setStyleSheet(sf.read())
win = PartMan(None)
win.setObjectName("widgetStack")
win.show()
cache_order = [
'/var/lib/partman/devices/=dev=sda//',
'/var/lib/partman/devices/=dev=sda//32256-8167703039',
'/var/lib/partman/devices/=dev=sda//8167735296-8587192319',
'/var/lib/partman/devices/=dev=sdb//',
'/var/lib/partman/devices/=dev=sdb//32256-5074997759',
'/var/lib/partman/devices/=dev=sdb//5075030016-5362882559',
'/var/lib/partman/devices/=dev=sdc//',
'/var/lib/partman/devices/=dev=sdc//32256-5074997759',
'/var/lib/partman/devices/=dev=sdc//5075030016-53 | 62882559',
]
| def tree_device(dev, part_id=None):
prefix = '60partition_tree__________/var/lib/partman/devices/=dev='
if part_id is None:
return prefix + dev + '//'
else:
return prefix + dev + '//' + part_id
disk_cache = {
'/var/lib/partman/devices/=dev=sda//': {
'dev': '=dev=sda',
'device': '/dev/sda',
'display': tree_device('sda'),
'label': ['msdos'],
},
'/var/lib/partman/devices/=dev=sdb//': {
'dev': '=dev=sdb',
'device': '/dev/sdb',
'display': tree_device('sdb'),
'label': ['msdos'],
},
'/var/lib/partman/devices/=dev=sdc//': {
'dev': '=dev=sdc',
'device': '/dev/sdc',
'display': tree_device('sdc'),
'label': ['msdos'],
},
}
partition_cache = {
'/var/lib/partman/devices/=dev=sda//32256-8167703039': {
'can_resize': True,
'detected_filesystem': 'ext4',
'dev': '=dev=sda',
'display': tree_device('sda', '32256-8167703039'),
'id': '32256-8167703039',
'method_choices': [
('25filesystem', 'ext4', 'Ext4 journaling file system'),
('25filesystem', 'ext3', 'Ext3 journaling file system'),
('25filesystem', 'ext2', 'Ext2 file system'),
('25filesystem', 'btrfs', 'btrfs journaling file system'),
('25filesystem', 'jfs', 'JFS journaling file system'),
('25filesystem', 'xfs', 'XFS journaling file system'),
('25filesystem', 'fat16', 'FAT16 file system'),
('25filesystem', 'fat32', 'FAT32 file system'),
('40swap', 'swap', 'swap area'),
('70dont_use', 'dontuse', 'do not use the partition'),
],
'parent': '/dev/sda',
'parted': {
'fs': 'ext4',
'id': '32256-8167703039',
'name': '',
'num': '1',
'path': '/dev/sda1',
'size': '8167670784',
'type': 'primary',
},
'resize_max_size': 8167670784,
'resize_min_size': 2758852608,
'resize_pref_size': 8167670784,
},
'/var/lib/partman/devices/=dev=sda//8167735296-8587192319': {
'can_resize': True,
'detected_filesystem': 'linux-swap',
'dev': '=dev=sda',
'display': tree_device('sda', '8167735296-8587192319'),
'id': '8167735296-8587192319',
'method': 'swap',
'method_choices': [
('25filesystem', 'ext4', 'Ext4 journaling file system'),
('25filesystem', 'ext3', 'Ext3 journaling file system'),
('25filesystem', 'ext2', 'Ext2 file system'),
('25filesystem', 'btrfs', 'btrfs journaling file system'),
('25filesystem', 'jfs', 'JFS journaling file system'),
('25filesystem', 'xfs', 'XFS journaling file system'),
('25filesystem', 'fat16', 'FAT16 file system'),
('25filesystem', 'fat32', 'FAT32 file system'),
('40swap', 'swap', 'swap area'),
('70dont_use', 'dontuse', 'do not use the partition'),
],
'parent': '/dev/sda',
'parted': {
'fs': 'linux-swap',
'id': '8167735296-8587192319',
'name': '',
'num': '5',
'path': '/dev/sda5',
'size': '419457024',
'type': 'logical',
},
'resize_max_size': 419457024,
'resize_min_size': 4096,
'resize_pref_size': 419457024,
},
'/var/lib/partman/devices/=dev=sdb//32256-5074997759': {
'can_resize': True,
'detected_filesystem': 'ext4',
'dev': '=dev=sdb',
'display': tree_device('sdb', '32256-5074997759'),
'id': '32256-5074997759',
'method_choices': [
('25filesystem', 'ext4', 'Ext4 journaling file system'),
('25filesystem', 'ext3', 'Ext3 journaling file system'),
('25filesystem', 'ext2', 'Ext2 file system'),
('25filesystem', 'btrfs', 'btrfs journaling file system'),
('25filesystem', 'jfs', 'JFS journaling file system'),
('25filesystem', 'xfs', 'XFS journaling file system'),
('25filesystem', 'fat16', 'FAT16 file system'),
('25filesystem', 'fat32', 'FAT32 file system'),
('40swap', 'swap', 'swap area'),
('70dont_use', 'dontuse', 'do not use the partition'),
],
'parent': '/dev/sdb',
'parted': {
'fs': 'ext4',
'id': '32256-5074997759',
'name': '',
'num': '1',
'path': '/dev/sdb1',
'size': '5074965504',
'type': 'primary',
},
'resize_max_size': 5074965504,
'resize_min_size': 223924224,
'resize_pref_size': 5074965504,
},
'/var/lib/partman/devices/=dev=sdb//5075030016-5362882559': {
'can_resize': True,
'detected_filesystem': 'linux-swap',
'dev': '=dev=sdb',
'display': tree_device('sdb', '5075030016-5362882559'),
'id': '5075030016-5362882559',
'method': 'swap',
'method_choices': [
('25filesystem', 'ext4', 'Ext4 journaling file system'),
('25filesystem', 'ext3', 'Ext3 journaling file system'),
('25filesystem', 'ext2', 'Ext2 file system'),
('25filesystem', 'btrfs', 'btrfs journaling file system'),
('25filesystem', 'jfs', 'JFS journaling file system'),
('25filesystem', 'xfs', 'XFS journaling file system'),
('25filesystem', 'fat16', 'FAT16 file system'),
('25filesystem', 'fat32', 'FAT32 file system'),
('40swap', 'swap', 'swap area'),
('70dont_use', 'dontuse', 'do not use the partition'),
],
'parent': '/dev/sdb',
'parted': {
'fs': 'linux-swap',
'id': '5075030016-5362882559',
'name': '',
'num': '5',
'path': '/dev/sdb5',
'size': '287852544',
'type': 'logical',
},
'resize_max_size': 287852544,
'resize_min_size': 4096,
'resize_pref_size': 287852544,
},
'/var/lib/partman/devices/=dev=sdc//32256-5074997759': {
'can_resize': True,
'detected_filesystem': 'ext4',
'dev': '=dev=sdc',
'display': tree_device('sdc', '32256-5074997759'),
'id': '32256-5074997759',
'method_choices': [
('25filesystem', 'ext4', 'Ext4 journal |
econne01/flask_blog | app/app.py | Python | mit | 1,579 | 0.002533 | """ Configuration for Flask app """
import os
import urllib
from flask import (Flask, abort, flash, Response)
from playhouse.flask_utils import FlaskDB
ADMIN_PASSWORD = 'secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'shhh, secret!' # Used by Flask to encrypt session cookie.
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
from models import Entry, FTSEntry
database.create_tables([Entry, FTSEntry], safe=True)
# Setup routes
import views
app.add_url_rule('/login/', 'login', views.login, methods=['GET', 'POST'])
app.add_url_rule('/logout/', 'logout', views.logout, methods=['GET', 'POST'])
app.add_url_rule('/', 'index', views.index, methods=['GET'])
app.add_url_rule('/create', 'create', views.create, methods=['GET', 'POST'])
app.add_url_rule('/drafts | ', 'drafts', views.drafts, methods=['GET'])
app.add_url_rule('/<slug>', 'detail', views.detail, methods=['GET'])
app.add_url_rule('/<slug>/edit', 'edit', views.edit, methods=['GET', 'POST'])
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, N | one)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>404 Error: Page Not found</h3>'), 404
|
cuckoo5/soap | Soap_know/oauth/base_mixin.py | Python | gpl-3.0 | 196 | 0.005102 | # c | oding=utf-8
from tornado import httpclient
from tornado.auth import OAuth2Mixin
class BaseMixin(OAuth2Mixin):
def get_auth_http_c | lient(self):
return httpclient.AsyncHTTPClient()
|
Maximilian-Reuter/SickRage-1 | lib/fake_useragent/fake.py | Python | gpl-3.0 | 1,434 | 0 | from __future__ import absolute_import, unicode_literals
import random
from threading import Lock
from fake_useragent import settings
from fake_useragent.utils import load, load_cached, update
class UserAgent(object):
lock = Lock() # mutable cross-instance threading.Lock
def __init__(self, cache=True):
self.cache = cache
with self.lock:
self.load()
def load(self):
if self.cache:
self.data = load_cached()
else:
self.data = load()
def update(self, cache=None):
if cache is not None:
self.cache = cache
if self.cache:
update()
self.load()
def __getitem__(self, attr):
return self.__getattr__(attr)
def __getattr__(self, attr):
for value, replacement in settings.REPLACEMENT | S.items():
attr = attr.replace(value, replacement)
attr = attr.lower()
if attr == 'random':
attr = self.data['randomiz | e'][
str(random.randint(0, len(self.data['randomize']) - 1))
]
else:
if attr in settings.SHORTCUTS:
attr = settings.SHORTCUTS[attr]
try:
return self.data['browsers'][attr][
random.randint(
0, len(self.data['browsers'][attr]) - 1
)
]
except KeyError:
return None
|
kohnle-lernmodule/palama | exe/webui/block.py | Python | gpl-2.0 | 12,877 | 0.007067 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write | to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Block is the base class for the classes which are responsible for
rendering and processing Idevices in XHTML
"""
import sys
from exe.webui import common
from exe.webui.renderable import Renderable
from exe.engine.idevice import Idevice
import logging
log = logging.getLogger(__name__)
# ===========================================================================
class Block(Renderable):
"""
Block is the base class for the classes which are responsible for
rendering and processing Idevices in XHTML
"""
nextId = 0
Edit, Preview, View, Hidden = range(4)
def __init__(self, parent, idevice):
"""
Initialize a new Block object
"""
Renderable.__init__(self, parent, name=idevice.id)
self.idevice = idevice
self.id = idevice.id
self.purpose = idevice.purpose
self.tip = idevice.tip
if idevice.edit:
self.mode = Block.Edit
else:
self.mode = Block.Preview
def process(self, request):
"""
Process the request arguments from the web server to see if any
apply to this block
"""
log.debug(u"process id="+self.id)
if u"object" in request.args and request.args[u"object"][0] == self.id:
# changing to a different node does not dirty package
if request.args[u"action"][0] != u"changeNode":
self.package.isChanged = 1
log.debug(u"package.isChanged action="+request.args[u"action"][0])
if request.args[u"action"][0] == u"done":
self.processDone(request)
elif request.args[u"action"][0] == u"edit":
self.processEdit(request)
elif request.args[u"action"][0] == u"delete":
self.processDelete(request)
elif request.args[u"action"][0] == u"move":
self.processMove(request)
elif request.args[u"action"][0] == u"movePrev":
self.processMovePrev(request)
elif request.args[u"action"][0] == u"moveNext":
self.processMoveNext(request)
elif request.args[u"action"][0] == u"promote":
self.processPromote(request)
elif request.args[u"action"][0] == u"demote":
self.processDemote(request)
elif request.args[u"action"][0] == u"cancel":
self.idevice.edit = False
else:
self.idevice.lastIdevice = False
self.processDone(request)
def processDone(self, request):
"""
User has finished editing this block
"""
log.debug(u"processDone id="+self.id)
self.idevice.edit = False
def processEdit(self, request):
"""
User has started editing this block
"""
log.debug(u"processEdit id="+self.id)
self.idevice.lastIdevice = True
self.idevice.edit = True
def processDelete(self, request):
"""
Delete this block and the associated iDevice
"""
log.debug(u"processDelete id="+self.id)
self.idevice.delete()
def processMove(self, request):
"""
Move this iDevice to a different node
"""
log.debug(u"processMove id="+self.id)
nodeId = request.args[u"move"+self.id][0]
node = self.package.findNode(nodeId)
if node is not None:
self.idevice.setParentNode(node)
else:
log.error(u"addChildNode cannot locate "+nodeId)
def processPromote(self, request):
"""
Promote this node up the hierarchy tree
"""
log.debug(u"processPromote id="+self.id)
def processDemote(self, request):
"""
Demote this node down the hierarchy tree
"""
log.debug(u"processDemote id="+self.id)
def processMovePrev(self, request):
"""
Move this block back to the previous position
"""
log.debug(u"processMovePrev id="+self.id)
self.idevice.movePrev()
def processMoveNext(self, request):
"""
Move this block forward to the next position
"""
log.debug(u"processMoveNext id="+self.id)
self.idevice.moveNext()
def render(self, style):
"""
Returns the appropriate XHTML string for whatever mode this block is in
"""
html = u''
broken = '<p><span style="font-weight: bold">%s:</span> %%s</p>' % _('IDevice broken')
try:
if self.mode == Block.Edit:
self.idevice.lastIdevice = True
html += u'<a name="currentBlock"></a>\n'
html += self.renderEdit(style)
elif self.mode == Block.View:
html += self.renderView(style)
elif self.mode == Block.Preview:
if self.idevice.lastIdevice:
html += u'<a name="currentBlock"></a>\n'
html += self.renderPreview(style)
except Exception, e:
from traceback import format_tb
log.error('%s:\n%s' % (str(e), '\n'.join(format_tb(sys.exc_traceback))))
html += broken % str(e)
if self.mode == Block.Edit:
html += self.renderEditButtons()
if self.mode == Block.Preview:
html += self.renderViewButtons()
return html
def renderEdit(self, style):
"""
Returns an XHTML string with the form element for editing this block
"""
log.error(u"renderEdit called directly")
return u"ERROR Block.renderEdit called directly"
def renderEditButtons(self, undo=True):
"""
Returns an XHTML string for the edit buttons
"""
html = common.submitImage(u"done", self.id,
u"/images/stock-apply.png",
_(u"Done"),1)
if undo:
html += common.submitImage(u"cancel", self.id,
u"/images/stock-undo.png",
_(u"Undo Edits"),1)
else:
html += common.submitImage(u"no_cancel", self.id,
u"/images/stock-undoNOT.png",
_(u"Can NOT Undo Edits"),1)
html += common.confirmThenSubmitImage(
_(u"This will delete this iDevice."
u"\\n"
u"Do you really want to do this?"),
u"delete",
self.id, u"/images/stock-cancel.png",
_(u"Delete"), 1)
if self.idevice.isFirst():
html += common.image(u"movePrev", u"/images/stock-go-up-off.png")
else:
html += common.submitImage(u"movePrev", self.id,
u"/images/stock-go-up.png",
_(u"Move Up"),1)
if self.idevice.isLast():
html += common.image(u"moveNext", u"/images/stock-go-down-off.png")
else:
html += common.submitImage(u" |
antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_MovingAverage/cycle_5/ar_/test_artificial_128_Fisher_MovingAverage_5__100.py | Python | bsd-3-clause | 266 | 0.086466 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_le | ngth = 5, transform = "Fisher", sigma | = 0.0, exog_count = 100, ar_order = 0); |
bobwalker99/Pydev | plugins/com.python.pydev.refactoring/tests/pysrcrefactoring/reflib/renamemodule6/scene_tree.py | Python | epl-1.0 | 25 | 0.04 | class | SceneTree:
| pass |
opendatakosovo/kosovolunteer | ve/views/delete_event.py | Python | gpl-2.0 | 558 | 0.003584 | from flask import Flask
from flask.views import View
from flask import Response, request
import urllib2
from ve import utils
class DeleteEvent(View):
def dispatch_request(self):
api_base_url = utils.get_ | api_url()
url = '%s/delete/event'%(api_base_url)
data = request.data
print data
r = urllib2.Request(url, data=data, headers={"Content-Type": "application/json"})
res = urllib2.urlopen(r)
resp = Response(
response=data,
mimetype='application/json')
return resp
| |
nerandell/vyked | vyked/utils/decorators.py | Python | mit | 356 | 0 | import warnings
from functools import wraps
def deprecated(func):
"""
Generates a deprecation warning
"""
@wraps(func)
def wrapper(*args, **kwargs):
msg = "'{}' is deprecated".format(func.__name__)
warnings.warn(msg, category=Depre | cationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrappe | r
|
atyndall/cits4211 | tools/tree_generator.py | Python | mit | 14,570 | 0.02464 | # tree_generator was written with Python 2.7.4.
# The pickle files it produces should not be read with a version of
# Python less than 2.7.4, as they are not forwards compatible.
from piece_definitions import PIECES
import numpy as np
import sys
import collections
import itertools
import argparse
import multiprocessing
import time
import hashlib
from math import factorial
from rect import Rect
from tree import *
from helper import *
import pickle
WIDTH = 4 # Default width
HEIGHT = 4 # Default height
BOARD = Board(HEIGHT, WIDTH)
PIECES_FIT = (WIDTH * HEIGHT) / 4 # Number of pieces that can fit in board
NUM_PIECES = len(PIECES)
NOTIFY_INTERVAL = 10 # Number of seconds between progress notification
args = None
# The adjacent function returns a 2D-array of all blocks that are vertically adjacent
# to the given 2D-array "a".
# A piece is not hovering in midair if part of it collides with the adjacent matrix.
def adjacent(a):
HEIGHT = a.shape[0]
WIDTH = a.shape[1]
m = np.zeros((HEIGHT, WIDTH), np.bool)
m[-1] = True # Set bottom row
# Set edge values
for x in range(HEIGHT):
for y in range(WIDTH):
if np.all(a[:, y]): # Special case for blocks that take up a whole column
m[:, y] = False
elif a[x, y] and x > 0:
m[x-1, y] = True
# Remove all but heighest values
for x in range(HEIGHT):
for y in range(WIDTH):
if m[x, y]:
m[x+1:, y] = False
return m
# The overhang function returns a 2D-array of all blocks that are empty space, but
# have a piece above them.
# A piece can be successfully dropped from above into its current position if it does
# not collide with the overhang matrix.
def overhang(a):
HEIGHT = a.shape[0]
WIDTH = a.shape[1]
m = np.zeros((HEIGHT, WIDTH), np.bool)
for y in range(WIDTH):
for x in range(1, HEIGHT):
if a[x-1, y] and not a[x, y]:
m[x, y] = True
return m
# The possible function returns a value indicating if a piece placement "p" on a given
# Tetris grid "a" would be possible (p does not occupy the same space as a).
def possible(p, a):
# See if the pieces clash
land = np.logical_and(p, a)
if np.any(land):
return False
return True
# The possible function returns a value indicating if a piece placement "p" on a given
# Tetris grid "a" would be valid (p is not in mid-air, and can be dropped vertically
# into destination position).
def valid(p, a):
# See if the piece is being placed in mid-air
hover = np.logical_and( p, adjacent(a) )
if not np.any(hover):
return False
# See if the piece can be placed when dropped vertically
drop = np.logical_and( p, overhang(a) )
if np.any(drop):
return False
return True
# Calculate every possible position a piece can have on a WIDTH*HEIGHT grid
def calculate_positions():
print 'Computing all possible orientations and positions of given tetrominoes on %dx%d grid.' % (WIDTH, HEIGHT)
possibilities = []
i = 0
for n, p in enumerate(PIECES):
options = []
p_width = len(p[0])
p_height = len(p)
# Calculate the number of rotations a piece requires, default 3 (all)
nrot = 4
if rall(p):
if p_width == p_height: # Piece is square, no rotation needed
nrot = 1
else: # Piece is rectangular, one rotation needed
nrot = 2
# Add all rotations to an options list
for r in range(nrot):
p = np.rot90(p, r)
# Remove duplicate rotations
already = False
for p2, r2 in options:
if np.array_equal(p, p2):
already = True
if not already:
options.append((p, r))
# Create all combinations
| for _, r in options:
for h in | range(HEIGHT):
for w in range(WIDTH):
try:
i += 1
op = DAction(BOARD, n, r, h, w)
possibilities.append(op)
except PieceNotFitError:
pass
print i
lp = len(possibilities)
print "There are %d possible orientations and positions for the given tetrominoes." % lp
calculate_possible(possibilities)
# Simple iterator that outputs the HEIGHT and WIDTH for our multiprocessing functions
def hw_iterator():
while True:
yield (HEIGHT, WIDTH)
# Check possibility
def check_possibility(data):
global PIECES, HEIGHT, WIDTH
hw, cur_pieces = data
height, width = hw
HEIGHT = height
WIDTH = width
board = np.zeros((HEIGHT, WIDTH), np.bool)
indr = [] # List of coordinate pairs of all pieces
lowestc = [HEIGHT, WIDTH] # Lowest coordinate of all pieces: (bottom, left)
highestc = [0, 0] # Highest coordinate of all pieces: (top, right)
boxcalc = False
prev_p = None
prev_bounding = None
for p in cur_pieces:
pheight = len(PIECES[p.piece])
pwidth = len(PIECES[p.piece][0])
coords = [[p.h, p.w], [pheight + p.h, pwidth + p.w]]
max_bounding = Rect(lowestc, highestc)
cur_bounding = Rect(*coords) # (bottom, left), (top, right)
if prev_p is not None and prev_bounding is not None:
board = np.logical_or(prev_p.data, board)
indr.append(prev_bounding)
prev_p = p
prev_bounding = cur_bounding
# We couldn't work out if it collides or not cheaply, so now onto the hard stuff
if not possible(p.data, board):
return None # This seems to have improved performance by like 10000%, very suspicious, keep an eye on it
return cur_pieces
# Input seconds, output H:MM:SS
def time_output(s):
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
return '%.f:%02.f:%02.f' % (hours, minutes, seconds)
# We combine all existing combinations and rotations of pieces to see which
# successfully fit together.
def calculate_possible(positions):
lp = len(positions)
search_space = 0
iterables = []
for i in range(PIECES_FIT):
search_space = search_space + ( factorial(lp) / ( factorial(lp-(PIECES_FIT-i)) * factorial(PIECES_FIT-i) ) )
iterables.append(itertools.combinations(positions, PIECES_FIT-i))
print "Calculating possible combinations of tetrominoes from all placements (%d combinations)." % search_space
start_time = time.time()
combinations = []
timer = time.time()
prev_i = 0
pool = multiprocessing.Pool() # Use multiple processes to leaverage maximum processing power
#for i, res in enumerate( itertools.imap(check_possibility, itertools.combinations(positions, PIECES_FIT)) ):
for i, res in enumerate( pool.imap_unordered(check_possibility, itertools.izip(hw_iterator(), itertools.chain(*iterables)), max(5, search_space/500)) ):
if res:
combinations.append(res)
elapsed = time.time() - timer
if elapsed > NOTIFY_INTERVAL and i != 0: # If x seconds have elapsed
pps = (i-prev_i)/elapsed
print "Searched %d/%d placements (%.1f%% complete, %.0f pieces/sec, ~%s remaining)" % (i, search_space, (i/float(search_space))*100, pps, time_output((search_space-i)/pps))
prev_i = i
timer = time.time()
pool.terminate()
lc = len(combinations)
print "There are %d possible combinations of a maximum of %d tetrominoes within the %d positions." % (lc, PIECES_FIT, search_space)
print "The calculation took %s." % time_output(time.time() - start_time)
if args.out_p:
pickle.dump(combinations, open(args.out_p,'wb'))
print "Output saved to '%s'." % args.out_p
calculate_valid(combinations)
# Check validity
def check_validity(data):
global HEIGHT, WIDTH
hw, pieces = data
height, width = hw
HEIGHT = height
WIDTH = width
board = np.zeros((HEIGHT, WIDTH), np.bool)
pos = True
for p in pieces:
if valid(p.data, board):
board = np.logical_or(p.data, board)
else:
return None
if pos:
return pieces
# We permute over all possible combinations and rotations of pieces to see which
# are valid tetris plays.
def calculate_valid(possibilities):
lp = len(possibilities)
search_space = lp * factorial(PIECES_FIT)
start_time = time.time()
print "Calculating valid permutations of tetrominoes from all possible (%d permutations)." % search_space
combinations = []
timer = |
federicotdn/piter | src/util.py | Python | gpl-3.0 | 625 | 0.056 | import unicodedata
import curses
class BufferError(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(sel | f):
return repr(self._msg)
def interpret_arrowkey(key):
directions = { curses.KEY_UP : (0, -1),
curses.KEY_DOWN : (0, 1),
curses.KEY_RIGHT : (1, 0),
curses.KEY_LEFT : (-1, 0) }
if key not in directions: return None |
return directions[key]
def is_printable(t):
if not isinstance(t, str):
return False
non_printables = ['Cc', 'Cn']
printables_extra = ['\n', '\t']
return unicodedata.category(t) not in non_printables or \
t in printables_extra
|
schelleg/PYNQ | pynq/lib/rpi/rpi.py | Python | bsd-3-clause | 5,799 | 0.001207 | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON | ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from pynq.lib import PynqMicroblaze
from pynq.lib.pynqmicroblaze import add_bsp
from . import MAILBOX_OFFSE | T
from . import MAILBOX_PY2IOP_CMD_OFFSET
from . import BIN_LOCATION
from . import BSP_LOCATION
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "yunq@xilinx.com"
class Rpi(PynqMicroblaze):
"""This class controls the Raspberry Pi Microblaze instances in the system.
This class inherits from the PynqMicroblaze class. It extends
PynqMicroblaze with capability to control Raspberry Pi devices.
Attributes
----------
ip_name : str
The name of the IP corresponding to the Microblaze.
rst_name : str
The name of the reset pin for the Microblaze.
mb_program : str
The absolute path of the Microblaze program.
state : str
The status (IDLE, RUNNING, or STOPPED) of the Microblaze.
reset_pin : GPIO
The reset pin associated with the Microblaze.
mmio : MMIO
The MMIO instance associated with the Microblaze.
interrupt : Event
An asyncio.Event-like class for waiting on and clearing interrupts.
"""
def __init__(self, mb_info, mb_program):
"""Create a new Microblaze object.
This method leverages the initialization method of its parent. It
also deals with relative / absolute path of the program.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
mb_program : str
The Microblaze program loaded for the processor.
Examples
--------
The `mb_info` is a dictionary storing Microblaze information:
>>> mb_info = {'ip_name': 'mb_bram_ctrl_1',
'rst_name': 'mb_reset_1',
'intr_pin_name': 'iop1/dff_en_reset_0/q',
'intr_ack_name': 'mb_1_intr_ack'}
"""
if not os.path.isabs(mb_program):
mb_program = os.path.join(BIN_LOCATION, mb_program)
super().__init__(mb_info, mb_program)
def write_mailbox(self, data_offset, data):
"""This method write data into the mailbox of the Microblaze.
Parameters
----------
data_offset : int
The offset for mailbox data, 0,4,... for MAILBOX 0,1,...
data : int/list
A list of 32b words to be written into the mailbox.
Returns
-------
None
"""
offset = MAILBOX_OFFSET + data_offset
self.write(offset, data)
def read_mailbox(self, data_offset, num_words=1):
"""This method reads mailbox data from the Microblaze.
Parameters
----------
data_offset : int
The offset for mailbox data, 0,4,... for MAILBOX 0,1,...
num_words : int
Number of 32b words to read from Microblaze mailbox.
Returns
-------
int/list
An int of a list of data read from the mailbox.
"""
offset = MAILBOX_OFFSET + data_offset
return self.read(offset, num_words)
def write_blocking_command(self, command):
"""This method writes a blocking command to the Microblaze.
The program waits in the loop until the command is cleared by the
Microblaze.
Parameters
----------
command : int
The command to write to the Microblaze.
Returns
-------
None
"""
self.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, command)
while self.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET) != 0:
pass
def write_non_blocking_command(self, command):
"""This method writes a non-blocking command to the Microblaze.
The program will just send the command and returns the control
immediately.
Parameters
----------
command : int
The command to write to the Microblaze.
Returns
-------
None
"""
self.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, command)
if os.path.exists(BSP_LOCATION):
add_bsp(BSP_LOCATION)
|
Kamik423/uni_plan | plan/plan/lib64/python3.4/base64.py | Python | apache-2.0 | 20,167 | 0.002231 | #! /usr/bin/python3.4
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Base85 and Ascii85 encodings
'b85encode', 'b85decode', 'a85encode', 'a85decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
if isinstance(s, bytes_types):
return s
try:
return memoryview(s).tobytes()
except TypeError:
raise TypeError("argument should be a bytes-like object or ASCII "
"string, not %r" % s.__class__.__name__) from None
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error | ('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the i | nput is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
_b32tab2 = None
_b32rev = None
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
global _b32tab2
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32tab2 is None:
b32tab = [bytes((i,)) for i in _b32alphabet]
_b32tab2 = [a + b for a in b32tab for b in b32tab]
b32tab = None
if not isinstance(s, bytes_types):
s = memoryview(s).tobytes()
leftover = len(s) % 5
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
encoded = bytearray()
from_bytes = int.from_bytes
b32tab2 = _b32tab2
for i in range(0, len(s), 5):
c = from_bytes(s[i: i + 5], 'big')
encoded += (b32tab2[c >> 30] + # bits 1 - 10
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
b32tab2[c & 0x3ff] # bits 31 - 40
)
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
global _b32rev
# Delay the initialization of the table to not waste memory
# if the function is never called
if _b32rev is None:
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
s = _bytes_from_decode_data(s)
if len(s) % 8:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
l = len(s)
s = s.rstrip(b'=')
padchars = l - len(s)
# Now decode the full quanta
decoded = bytearray()
b32rev = _b32rev
for i in range(0, len(s), 8):
|
TimelyToga/wtf_is | wtf_proj/wtf_proj/views.py | Python | mit | 3,001 | 0.000666 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from bs4 import BeautifulSoup as bs
import requests
import json
WIKI_PAGE_BASE = "https://en.wikipedia.org/wiki/"
WIKI_API_BASE = "https://en.wikipedia.org/w/api.php?format=json&action=query"
WIKI_EXTRACT = WIKI_API_BASE + "&prop=extracts&exintro=&explaintext=&titles="
WIKI_SEARCH = WIKI_API_BASE + "&list=search&meta=&titles=&utf8=1&srinfo=suggestion|totalhits&srsearch="
def home(request):
return render(request, "home.html", context={})
def about(request):
return render(request, "about.html", context={"term": "about_page"})
def definition(request, term):
context = {"term": term, "link": WIKI_PAGE_BASE + term}
# Get definition from Wikipedia
wiki_def = api_fs(term)
if wiki_def:
context["def"] = wiki_def
else:
return HttpResponseRedirect("/search_results/?term=" + term)
return render(request, "definition.html", context=context)
def search(request):
parameter = request.GET.get("search", "")
if parameter:
# Check to make sure it is a Wikipedia page
| return HttpResponseRedirect("/is/" + parameter)
def scrape_site_fs(term):
rdata = requests.get(WIKI_PAGE_BASE + term)
soup = bs(rdata.content)
page_content = soup.find("div", {"id": "mw-content-text"})
if page_content:
return page_content.findAll("p")[:5]
return None
def api_fs(term):
print(WIKI_EXTRACT + term)
rdata = requests.get(WIKI_EXTRACT + term).content
json_data = json.loads(rdata)
if not json_data:
# perform search
return | HttpResponseRedirect("/search_results/?term=" + term)
# Get the pages from the results
pages = json_data["query"]["pages"]
page_keys = pages.keys()
if int(page_keys[0]) == -1:
return None
first_page = pages[page_keys[0]]
if not first_page:
# If no first page, perform search
return None
return first_page["extract"]
def search_for_results(request):
term = request.GET.get("term", "")
j_data = json_request(WIKI_SEARCH + term)
context = {}
if not j_data:
# Issue an error
return HttpResponse("Wikipedia didn't send back JSON when you searched for " + term + " . Pls try again.")
query = j_data["query"]
search_info = query["searchinfo"]
search = query["search"]
if len(search) == 0 or int(search_info["totalhits"]) == 0:
context["failed_search"] = True
if "suggestion" in search_info.keys():
# offer suggestion
context["suggestion"] = search_info["suggestion"]
else:
# report search failure
context["failed_search"] = True
else:
# Display the search results
context["results"] = search
return render(request, "search_results.html", context=context)
def json_request(url):
rdata = requests.get(url).content
return json.loads(rdata)
|
jainanisha90/WeVoteServer | import_export_ctcl/models.py | Python | mit | 730 | 0.00411 | # im | port_export_ctcl/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import m | odels
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
class CandidateSelection(models.Model):
"""
Contest Office to Candidate mapping is stored in this table.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="batch set id", default=0, null=True, blank=True)
candidate_selection_id = models.CharField(verbose_name="candidate selection id", default='', null=True,
max_length=255)
contest_office_id = models.CharField(verbose_name="contest office ctcl id", default='', null=True, max_length=255)
|
mpharrigan/msmbuilder | MSMBuilder/metrics/rmsd.py | Python | gpl-2.0 | 5,554 | 0.002341 | from __future__ import print_function, absolute_import, division
from mdtraj.utils.six.moves import xrange
import warnings
import numpy as np
from collections import namedtuple
import mdtraj as md
from .baseclasses import AbstractDistanceMetric
class RMSD(AbstractDistanceMetric):
"""
Compute distance b | etween frames using the Room Mean Square Deviation
over a specifiable set of atoms using the Theobald QCP algorithm
References
----------
.. [1] Theobald, D. L. Acta. Crystallogr., Sect. A 2005, 61, 478-480.
"""
def __init__(self, atomindices=None, omp_parallel=True):
"""Initalize an RMSD calculator
Parameters
----------
atomindices : ar | ray_like, optional
List of the indices of the atoms that you want to use for the RMSD
calculation. For example, if your trajectory contains the coordinates
of all the atoms, but you only want to compute the RMSD on the C-alpha
atoms, then you can supply a reduced set of atom_indices. If unsupplied,
all of the atoms will be used.
omp_parallel : bool, optional
Use OpenMP parallelized C code under the hood to take advantage of
multicore architectures. If you're using another parallelization scheme
(e.g. MPI), you might consider turning off this flag.
Notes
-----
You can also control the degree of parallelism with the OMP_NUM_THREADS
envirnoment variable
"""
self.atomindices = atomindices
self.omp_parallel = omp_parallel
def __repr__(self):
try:
val = 'metrics.RMSD(atom_indices=%s, omp_parallel=%s)' % (
repr(list(self.atomindices)), self.omp_parallel)
except:
val = 'metrics.RMSD(atom_indices=%s, omp_parallel=%s)' % (
self.atomindices, self.omp_parallel)
return val
def prepare_trajectory(self, trajectory):
"""Prepare the trajectory for RMSD calculation.
Preprocessing includes extracting the relevant atoms, centering the
frames, and computing the G matrix.
Parameters
----------
trajectory : mdtraj.Trajectory
Molecular dynamics trajectory
Returns
-------
theodata : array_like
A msmbuilder.metrics.TheoData object, which contains some preprocessed
calculations for the RMSD calculation
"""
if self.atomindices is not None:
if trajectory.topology is not None:
topology = trajectory.topology.copy()
else:
topology = None
t = md.Trajectory(xyz=trajectory.xyz.copy(), topology=topology)
t.restrict_atoms(self.atomindices)
else:
t = trajectory
t.center_coordinates()
return t
def one_to_many(self, prepared_traj1, prepared_traj2, index1, indices2):
"""Calculate a vector of distances from one frame of the first trajectory
to many frames of the second trajectory
The distances calculated are from the `index1`th frame of `prepared_traj1`
to the frames in `prepared_traj2` with indices `indices2`
Parameters
----------
prepared_traj1 : rmsd.TheoData
First prepared trajectory
prepared_traj2 : rmsd.TheoData
Second prepared trajectory
index1 : int
index in `prepared_trajectory`
indices2 : ndarray
list of indices in `prepared_traj2` to calculate the distances to
Returns
-------
Vector of distances of length len(indices2)
Notes
-----
If the omp_parallel optional argument is True, we use shared-memory
parallelization in C to do this faster. Using omp_parallel = False is
advised if indices2 is a short list and you are paralellizing your
algorithm (say via mpi) at a different
level.
"""
return md.rmsd(prepared_traj1, prepared_traj2, index1, parallel=self.omp_parallel, precentered=True)[indices2]
def one_to_all(self, prepared_traj1, prepared_traj2, index1):
"""Calculate a vector of distances from one frame of the first trajectory
to all of the frames in the second trajectory
The distances calculated are from the `index1`th frame of `prepared_traj1`
to the frames in `prepared_traj2`
Parameters
----------
prepared_traj1 : rmsd.TheoData
First prepared trajectory
prepared_traj2 : rmsd.TheoData
Second prepared trajectory
index1 : int
index in `prepared_trajectory`
Returns
-------
Vector of distances of length len(prepared_traj2)
Notes
-----
If the omp_parallel optional argument is True, we use shared-memory
parallelization in C to do this faster.
"""
return md.rmsd(prepared_traj2, prepared_traj1, index1, parallel=self.omp_parallel, precentered=True)
def _square_all_pairwise(self, prepared_traj):
"""Reference implementation of all_pairwise"""
warnings.warn(
'This is HORRIBLY inefficient. This operation really needs to be done directly in C')
output = np.empty((prepared_traj.n_frames, prepared_traj.n_frames))
for i in xrange(prepared_traj.n_frames):
output[i] = self.one_to_all(prepared_traj, prepared_traj, i)
return output
|
vsco/grpc | src/python/grpcio/grpc_core_dependencies.py | Python | bsd-3-clause | 33,262 | 0.000241 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/lib/profiling/basic_timers.c',
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/arena.c',
'src/core/lib/support/atm.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
'src/core/lib/support/cpu_iphone.c',
'src/core/lib/support/cpu_linux.c',
'src/core/lib/support/cpu_posix.c',
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
'src/core/lib/support/env_windows.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
'src/core/lib/channel/channel_stack.c',
'src/core/lib/channel/channel_stack_builder.c',
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/handshaker.c',
'src/core/lib/channel/handshaker_factory.c',
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll1_linux.c',
'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c',
'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c',
'src/core/lib/iomgr/ev_epollex_linux.c',
'src/core/lib/iomgr/ev_epollsig_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/ev_windows.c',
'src/core/lib/iomgr/exec_ctx.c',
'src/core/lib/iomgr/executor.c',
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/is_epollexclusive_available.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/lockfree_event.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr | /pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/i | omgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_factory_posix.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
'src/core/lib/iomgr/socket_utils_uv.c',
'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/iomgr/tcp_client_posix.c',
'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_manager.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
'src/core/lib/slice/b64.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
'src/core/lib/slice/slice_hash_table.c',
'src/core/lib/slice/slice_intern.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
'src/core/lib/surface/byte_buffer.c',
'src/core/lib/surface/byte_buffer_reader.c',
'src/core/lib/surface/call.c',
'src/core/lib/surface/call_details.c',
'src/core/lib/surface/call_log_batch.c',
'src/core/lib/surface/channel.c',
'src/core/lib/surface/channel_init.c',
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
'src/core/lib/surface/completion_queue_factory.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.c',
'src/core/lib/surface/server.c',
'src/core/lib/surface/validate_metadata.c',
'src/core/lib/surface/versio |
terencezl/pydass_vasp | pydass_vasp/electronic_structure/dos.py | Python | mit | 12,658 | 0.002844 | import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .helpers import determine_tag_value, figs_assert, initiate_figs, plot_helper_settings
from ..xml_utils import parse
def get_tdos(filepath='DOSCAR', ISPIN=None, Ef=None, plot=False, xlim=None, ylim_upper=None, on_figs=None):
""" |
Get the total density of states, with consideration of spin-polarization.
Accepts file type 'DOSCAR', or 'vasprun.xml'.
Parameters
----------
| filepath: string
filepath, default to 'DOSCAR'
For DOSCAR-type file, can be any string containing 'DOSCAR'.
For vasprun.xml-type file, can be any string ending with '.xml'.
ISPIN: int
user specified ISPIN
If not given, for DOSCAR-type file, infer from 'OUTCAR'/'INCAR'.
Ef: float
user specified Ef
plot: bool
whether to plot the data, default to False
xlim: list
the range of x-axis, 2 values in a list
ylim_upper: int/float
the upper limit of y-axis(, of the spin-combined plot if ISPIN == 2)
on_figs: list/int
the current figure numbers to plot to, default to new figures
Returns
-------
a dict, containing
'data': a pandas dataframe
'ax': the axes reference
"""
# get data
if re.match(r".*\.xml", filepath):
root = parse(filepath)
NEDOS = int(root.find("./parameters/separator[@name='dos']/i[@name='NEDOS']").text)
Ef = float(root.find("./calculation/dos/i[@name='efermi']").text)
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = int(root.find(
"./parameters/separator[@name='electronic']/separator[@name='electronic spin']/i[@name='ISPIN']").text)
if ISPIN == 1:
data = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 1']/r")):
data[n_step] = elem.text.split()
elif ISPIN == 2:
data1 = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 1']/r")):
data1[n_step] = elem.text.split()
data2 = np.zeros((NEDOS, 3))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/total/array/set/set[@comment='spin 2']/r")):
data2[n_step] = elem.text.split()
elif re.match(r".*DOSCAR.*", filepath):
with open(filepath, 'r') as f:
DOSCAR = f.readlines()
for i in range(len(DOSCAR)):
DOSCAR[i] = DOSCAR[i].split()
NEDOS = int(DOSCAR[5][2])
Ef = float(DOSCAR[5][3])
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = determine_tag_value('ISPIN', filepath)
data = np.array(DOSCAR[6:6 + NEDOS], dtype=float)
if ISPIN == 2:
data1 = data[:, [0, 1, 3]]
data2 = data[:, [0, 2, 4]]
# confluence and data organizing
if ISPIN == 1:
col_names = ['E', 'tot', 'tot_integrated']
data[:, 0] -= Ef
return_dict = {'data': pd.DataFrame(**{'columns': col_names, 'data': data})}
elif ISPIN == 2:
col_names1 = ['E', 'tot_up', 'tot_integrated_up']
col_names2 = ['E', 'tot_down', 'tot_integrated_down']
data1[:, 0] -= Ef
data2[:, 0] -= Ef
return_dict = {'data_spin_up': pd.DataFrame(**{'columns': col_names1, 'data': data1}),
'data_spin_down': pd.DataFrame(**{'columns': col_names2, 'data': data2}),
}
if plot:
# start plotting
figs_assert(on_figs, ISPIN, 'tdos')
if ISPIN == 1:
initiate_figs(on_figs)
plt.plot(data[:, 0], data[:, 1])
ax = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'tdos')
return_dict.update({'ax': ax})
elif ISPIN == 2:
# Plot the combined TDOS
initiate_figs(on_figs)
plt.plot(data1[:, 0], data1[:, 1] + data2[:, 1], label='spin up + down')
ax1 = plt.gca()
plot_helper_settings((xlim, [0, ylim_upper]), 'tdos')
# Plot the separated TDOS
initiate_figs(on_figs)
plt.plot(data1[:, 0], data1[:, 1], label='spin up')
plt.plot(data2[:, 0], -data2[:, 1], label='spin down')
ax2 = plt.gca()
ylim_upper_sp = None
ylim_lower_sp = None
if ylim_upper:
ylim_upper_sp = ylim_upper/2.
ylim_lower_sp = -ylim_upper_sp
plot_helper_settings((xlim, [ylim_lower_sp, ylim_upper_sp]), 'tdos')
return_dict.update({'ax_spin_combined': ax1, 'ax_spin_separated': ax2})
return return_dict
def get_ldos(atom, filepath='DOSCAR', ISPIN=None, LORBIT=None, Ef=None, plot=False, xlim=None, ylim_upper=None,
on_figs=None):
"""
Get the local projected density of states, with consideration of spin-polarization.
Accepts file type 'DOSCAR', or 'vasprun.xml'.
Parameters
----------
atom: int
the atom number in DOSCAR/POSCAR interested, counting from 1
filepath: string
filepath, default to 'DOSCAR'
For DOSCAR-type file, can be any string containing 'DOSCAR'.
For vasprun.xml-type file, can be any string ending with '.xml'.
ISPIN: int
user specified ISPIN
If not given, for DOSCAR-type file, infer from 'OUTCAR'/'INCAR'.
LORBIT: int
user specified LORBIT
If not given, for both DOSCAR- and vasprun.xml-types of file, infer from 'OUTCAR'/'INCAR'. Because there is an
error in vasprun.xml.
Ef: float
user specified Ef
plot: bool
whether to plot the data, default to False
xlim: list
the range of x-axis, 2 values in a list
ylim_upper: int/float
the upper limit of y-axis(, of the spin-combined plot if ISPIN == 2)
on_figs: list/int
the current figure numbers to plot to, default to new figures
Returns
-------
a dict, containing
'data': a dataframe
'ax': the axes reference
"""
# get data
if re.match(r".*\.xml", filepath):
root = parse(filepath)
NEDOS = int(root.find("./parameters/separator[@name='dos']/i[@name='NEDOS']").text)
Ef = float(root.find("./calculation/dos/i[@name='efermi']").text)
if ISPIN:
print("Using user specified ISPIN.")
else:
ISPIN = int(root.find(
"./parameters/separator[@name='electronic']/separator[@name='electronic spin']/i[@name='ISPIN']").text)
# vasprun.xml's LORBIT is not correct
if LORBIT:
print("Using user specified LORBIT.")
else:
LORBIT = determine_tag_value('LORBIT', filepath)
if ISPIN == 1:
if LORBIT == 10 or LORBIT == 0:
data = np.zeros((NEDOS, 4))
elif LORBIT == 11 or LORBIT == 1:
data = np.zeros((NEDOS, 10))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array/set/set[@comment='ion " + str(
atom) + "']/set[@comment='spin 1']/r")):
data[n_step] = elem.text.split()
elif ISPIN == 2:
if LORBIT == 10 or LORBIT == 0:
data1 = np.zeros((NEDOS, 4))
data2 = np.zeros((NEDOS, 4))
elif LORBIT == 11 or LORBIT == 1:
data1 = np.zeros((NEDOS, 10))
data2 = np.zeros((NEDOS, 10))
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array/set/set[@comment='ion " + str(
atom) + "']/set[@comment='spin 1']/r")):
data1[n_step] = elem.text.split()
for n_step, elem in enumerate(root.findall(
"./calculation/dos/partial/array |
Chilledheart/chromium | tools/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-relimport/pkg/relative.py | Python | bsd-3-clause | 57 | 0 | from | __future__ import absolute_import
fr | om . import mod
|
ldotlopez/appkit | tests/config.py | Python | gpl-2.0 | 2,801 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Luis López <luis@cuarentaydos.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import unittest
from ldotcommons import config
class TestRecord(unittest.TestCase):
def setUp(self):
pass
def test_init_with_args(self):
a = config.Record({'foo': 1, 'bar': 'x'})
self.assertEqual(a.get('fo | o'), 1)
b = config.Record()
b.set('foo', 1)
b.set('bar', 'x')
self.assertEqual(a, b)
def test_setget(self): |
s = config.Record()
s.set('foo', 1)
s.set('bar', 'x')
s.set('x.y', [])
self.assertEqual(s.get('foo'), 1)
self.assertEqual(s.get('bar'), 'x')
self.assertEqual(s.get('x.y'), [])
def test_nonexistent_key(self):
s = config.Record()
with self.assertRaises(KeyError):
s.get('foo')
def test_delete(self):
s = config.Record()
s.set('foo', 1)
s.set('foo.bar', 2)
s.delete('foo')
with self.assertRaises(KeyError):
s.get('foo.bar')
with self.assertRaises(KeyError):
s.get('foo')
def test_eq(self):
data = {
'foo': 1,
'x.y': 'z',
'dict': {'a': 'b'}
}
a = config.Record(**data.copy())
b = config.Record(**data.copy())
self.assertEqual(a, b)
def test_sub(self):
x = config.Record({
'foo': 1,
'bar.x': 'x',
'bar.y': 'y',
})
y = config.Record({
'x': 'x',
'y': 'y',
})
self.assertEqual(x.sub('bar'), y)
def test_children(self):
x = config.Record({
'foo': 1,
'bar.x': 'x',
'bar.y': 'y',
})
self.assertEqual(set(x.children('bar')), set(['x', 'y']))
class TestRecordAttr(unittest.TestCase):
def test_getset(self):
x = config.RecordAttr({'foo': 1, 'bar': 'x', 'a.b': 2})
self.assertEqual(x.foo, 1)
self.assertEqual(x.a.b, 2)
if __name__ == '__main__':
unittest.main()
|
pomarec/core | arkos/system/services.py | Python | gpl-3.0 | 12,907 | 0.00031 | """
Classes and functions for management of system-level services.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
import configparser
import os
import time
from dbus.exceptions import DBusException
from arkos import conns, signals
from arkos.utilities import shell
class ActionError(Exception):
"""An exception raised when a start/stop action can't be performed."""
def __init__(self, etype, emsg):
"""Initialize the exception."""
self.etype = etype
self.emsg = emsg
class Service:
"""
A class representing a system-level service.
Services can be of type ``systemd`` or ``supervisor``.
"""
def __init__(self, name="", stype="", state="", enabled=False, cfg={}):
"""
Initialize the service.
:param str name: Service name
:param str stype: either ``systemd`` or ``supervisor``
:param str state: Running state of the service
:param bool enabled: Service starts on boot?
:param dict cfg: Config (for supervisor services)
"""
self.name = name
self.stype = stype
self.state = state
self.enabled = enabled
self.cfg = cfg
@property
def sfname(self):
"""Return service file name."""
if self.stype == "supervisor":
return "{0}.ini".format(self.name)
else:
return "{0}.service".format(self.name)
def add(self, enable=True):
"""Add a new Supervisor service."""
signals.emit("services", "pre_add", self)
title = "program:{0}".format(self.name)
c = configparser.RawConfigParser()
c.add_section(title)
for x in self.cfg:
c.set(title, x, self.cfg[x])
with open(os.path.join("/etc/supervisor.d", self.sfname), "w") as f:
c.write(f)
if enable:
| self.enable()
signals.emit("services", "post_add", self)
def start(self):
"""Start service."""
signals.emit("services", "pre_start", self)
if self.stype == "supervisor":
supervisor_ping()
try:
conns.Supervisor.startProcess(self.name)
signals.emit("services", "post_start", self)
except:
| raise ActionError(
"svc", "The service failed to start. Please check "
"`sudo arkosctl svc status {0}`".format(self.name))
else:
# Send the start command to systemd
try:
path = conns.SystemD.LoadUnit(self.sfname)
conns.SystemD.StartUnit(self.sfname, "replace")
except DBusException as e:
raise ActionError("dbus", str(e))
timeout = 0
time.sleep(1)
# Wait for the service to start, raise exception if it fails
while timeout < 10:
data = conns.SystemDConnect(
path, "org.freedesktop.DBus.Properties")
data = data.GetAll("org.freedesktop.systemd1.Unit")
if str(data["ActiveState"]) == "failed":
raise ActionError(
"svc", "The service failed to start. Please check "
"`sudo arkosctl svc status {0}`".format(self.name))
elif str(data["ActiveState"]) == "active":
self.state = "running"
signals.emit("services", "post_start", self)
break
timeout += 1
time.sleep(1)
else:
raise ActionError("svc", "The service start timed out. "
"Please check `sudo arkosctl svc status {0}`"
.format(self.sfname))
def stop(self):
"""Stop service."""
signals.emit("services", "pre_stop", self)
if self.stype == "supervisor":
supervisor_ping()
conns.Supervisor.stopProcess(self.name)
signals.emit("services", "post_stop", self)
self.state = "stopped"
else:
# Send the stop command to systemd
try:
path = conns.SystemD.LoadUnit(self.sfname)
conns.SystemD.StopUnit(self.sfname, "replace")
except DBusException as e:
raise ActionError("dbus", str(e))
timeout = 0
time.sleep(1)
# Wait for the service to stop, raise exception if it fails
while timeout < 10:
data = conns.SystemDConnect(
path, "org.freedesktop.DBus.Properties")
data = data.GetAll("org.freedesktop.systemd1.Unit")
if str(data["ActiveState"]) in ["inactive", "failed"]:
self.state = "stopped"
signals.emit("services", "post_stop", self)
break
timeout + 1
time.sleep(1)
else:
raise ActionError("svc", "The service stop timed out. "
"Please check `sudo arkosctl svc status {0}`"
.format(self.sfname))
def restart(self, real=False):
"""Restart service."""
signals.emit("services", "pre_restart", self)
if self.stype == "supervisor":
supervisor_ping()
conns.Supervisor.stopProcess(self.name, wait=True)
conns.Supervisor.startProcess(self.name)
signals.emit("services", "post_restart", self)
else:
# Send the restart command to systemd
try:
path = conns.SystemD.LoadUnit(self.sfname)
if real:
conns.SystemD.RestartUnit(self.sfname, "replace")
else:
conns.SystemD.ReloadOrRestartUnit(self.sfname, "replace")
except DBusException as e:
raise ActionError("dbus", str(e))
timeout = 0
time.sleep(1)
# Wait for the service to restart, raise exception if it fails
while timeout < 10:
data = conns.SystemDConnect(
path, "org.freedesktop.DBus.Properties")
data = data.GetAll("org.freedesktop.systemd1.Unit")
if str(data["ActiveState"]) == "failed":
raise ActionError(
"svc", "The service failed to restart. Please check "
"`sudo arkosctl svc status {0}`".format(self.name))
elif str(data["ActiveState"]) == "active":
self.state = "running"
signals.emit("services", "post_restart", self)
break
timeout + 1
time.sleep(1)
else:
raise ActionError("svc", "The service restart timed out. "
"Please check `sudo arkosctl svc status {0}`"
.format(self.sfname))
def get_log(self):
"""Get supervisor service logs."""
if self.stype == "supervisor":
supervisor_ping()
s = conns.Supervisor.tailProcessStdoutLog(self.name)
else:
s = shell("systemctl --no-ask-password status {0}"
.format(self.sfname))["stdout"]
return s
def enable(self):
"""Enable service to start on boot."""
if self.stype == "supervisor":
disfsname = "{0}.disabled".format(self.sfname)
supervisor_ping()
if os.path.exists(os.path.join("/etc/supervisor.d", disfsname)):
os.rename(os.path.join("/etc/supervisor.d", disfsname),
os.path.join("/etc/supervisor.d", self.sfname))
conns.Supervisor.restart()
else:
try:
conns.SystemD.EnableUnitFiles([self.sfname], False, True)
except DBusException as e:
raise ActionError("dbus", str(e))
self.enabled = True
def disable(self):
"""Disable service starting |
stackforge/solum | solum/objects/sqlalchemy/image.py | Python | apache-2.0 | 5,222 | 0 | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from solum.objects import image as abstract
from solum.objects.sqlalchemy import mode | ls as sql
cfg.CONF.import_opt('operator_project_id',
'solum.api.handlers.language_pack_handler',
group='api')
operator_id = cfg.CONF.api.opera | tor_project_id
LOG = logging.getLogger(__name__)
class Image(sql.Base, abstract.Image):
"""Represent a image in sqlalchemy."""
__tablename__ = 'image'
__resource__ = 'images'
__table_args__ = sql.table_args()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), nullable=False)
name = sa.Column(sa.String(100))
source_uri = sa.Column(sa.String(1024))
source_format = sa.Column(sa.String(36))
description = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(36))
user_id = sa.Column(sa.String(36))
tags = sa.Column(sa.Text)
status = sa.Column(sa.String(12))
base_image_id = sa.Column(sa.String(36))
created_image_id = sa.Column(sa.String(36))
image_format = sa.Column(sa.String(12))
artifact_type = sa.Column(sa.String(36))
external_ref = sa.Column(sa.String(1024))
docker_image_name = sa.Column(sa.String(512))
@classmethod
def get_num_of_lps(cls, context):
try:
session = Image.get_session()
oper_result = session.query(cls).filter_by(
project_id=context.tenant, status='READY')
cnt = oper_result.count()
return cnt
except exc.NoResultFound:
LOG.debug("Exception encountered in getting count of number"
" of languagepacks")
@classmethod
def get_lp_by_name_or_uuid(cls, context, name_or_uuid,
include_operators_lp=False):
if uuidutils.is_uuid_like(name_or_uuid):
try:
session = Image.get_session()
result = session.query(cls).filter_by(
artifact_type='language_pack', uuid=name_or_uuid)
if include_operators_lp is True:
result = result.filter(
Image.project_id.in_([operator_id, context.tenant]))
return result.one()
else:
return sql.filter_by_project(context, result).one()
except exc.NoResultFound:
return cls.get_by_name(context, name_or_uuid,
include_operators_lp)
else:
return cls.get_by_name(context, name_or_uuid, include_operators_lp)
@classmethod
def get_by_name(cls, context, name, include_operators_lp=False):
try:
session = Image.get_session()
result = session.query(cls).filter_by(
artifact_type='language_pack', name=name)
if include_operators_lp is True:
result = result.filter(
Image.project_id.in_([operator_id, context.tenant]))
return result.one()
else:
return sql.filter_by_project(context, result).one()
except exc.NoResultFound:
cls._raise_not_found(name)
@classmethod
def get_all_languagepacks(cls, context):
"""Return all images that are languagepacks."""
session = Image.get_session()
result = session.query(cls)
result = result.filter_by(artifact_type='language_pack')
result = sql.filter_by_project(context, result)
# Include Languagepacks that have been created by the operator, and
# are in the 'READY' state.
# The operator LP is identified based on the operator_project_id
# config setting in solum.conf
oper_result = session.query(cls)
oper_result = oper_result.filter_by(artifact_type='language_pack')
oper_result = oper_result.filter_by(status='READY')
oper_result = oper_result.filter_by(project_id=operator_id)
return result.union(oper_result).all()
class ImageList(abstract.ImageList):
"""Represent a list of images in sqlalchemy."""
@classmethod
def get_all(cls, context):
"""Return all images."""
return ImageList(sql.model_query(context, Image))
@classmethod
def get_all_languagepacks(cls, context):
"""Return all images that are languagepacks."""
return ImageList(sql.model_query(
context, Image).filter_by(artifact_type='language_pack'))
|
knights-lab/shi7 | shi7/shi7_learning.py | Python | agpl-3.0 | 15,977 | 0.006009 | #!/usr/bin/env python
from __future__ import print_function, division
import multiprocessing
import os
import csv
import datetime
import logging
from datetime import datetime
import argparse
import shutil
import math
from glob import glob
import gzip
from shi7 import __version__
from shi7.shi7 import TRUE_FALSE_DICT, read_fastq, axe_adaptors_single_end, axe_adaptors_paired_end, flash_part1, \
flash_part2, split_fwd_rev, match_pairs, link_manicured_names
def make_arg_parser():
parser = argparse.ArgumentParser(description='This is the commandline interface for shi7_learning',
usage='shi7_learning v{version}\nshi7_learning.py -i <input> -o <output> ...'.format(version=__version__))
parser.add_argument('-i', '--input', help='Set the directory path of the fastq directory OR oligos.txt if splitting', required=True)
parser.add_argument('-o', '--output', help='Set the directory path of the output (default: cwd)', default=os.getcwd())
parser.add_argument('--debug', help='Retain all intermediate files (default: Disabled)', dest='debug', action='store_true')
parser.add_argument('-t', '--threads', help='Set the number of threads (default: %(default)s)',
default=min(multiprocessing.cpu_count(), 16))
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.set_defaults()
return parser
def subsample_fastqs(path_fastqs, num_files=10, num_sequences=1000):
for i, path_fastq in enumerate(path_fastqs):
if i >= num_files:
return
with open(path_fastq) as fastq_inf:
fastq_gen = read_fastq(fastq_inf)
yield limit_fastq(fastq_gen, num_sequences=num_sequences)
def limit_fastq(fastq_gen, num_sequences=1000):
for i in range(num_sequences):
try:
yield next(fastq_gen)
except StopIteration:
return
def get_seq_length_qual_scores(path_fastqs, output_path, num_files=10, num_sequences=1000):
subsampled_fastqs = subsample_fastqs(path_fastqs, num_files=num_files, num_sequences=num_sequences)
sequence_len_sum = 0.
quality_sum = 0
num_sequences = 0.
for fastq_path, fastq_gen in zip(path_fastqs, subsampled_fastqs):
with open(os.path.join(output_path, os.path.basename(fastq_path)), 'w') as outf:
for header, sequence, quality in fastq_gen:
outf.write("@%s\n%s\n+\n%s\n" % (header, sequence, quality))
sequence_len_sum += len(sequence)
quality_sum += sum([ord(i) for i in quality])
num_sequences += 1.
# Return (average length of sequences, average quality score)
return sequence_len_sum/num_sequences, quality_sum/sequence_len_sum
def count_num_lines(path):
with open(path) as path_inf:
return sum(1 for line in path_inf)
def get_file_size(path):
return os.path.getsize(path)
def check_sequence_name(path_R1, path_R2):
with open(path_R1) as path_inf_R1, open(path_R2) as path_inf_R2:
fastq_gen_R1 = read_fastq(path_inf_R1)
fastq_gen_R2 = read_fastq(path_inf_R2)
for gen_R1, gen_R2 in zip(fastq_gen_R1,fastq_gen_R2):
title_R1, title_R2 = gen_R1[0], gen_R2[0]
if len(title_R1) != len(title_R2):
return False
diff_idx = [i for i in range(len(title_R1)) if title_R1[i] != title_R2[i]]
if len(diff_idx) != 1:
return False
if int(title_R2[diff_idx[0]]) - int(title_R1[diff_idx[0]]) != 1:
return False
return True
def detect_paired_end(path_fastqs):
path_fastqs = [f for f in path_fastqs if f.endswith('.fastq') or f.endswith('.fq') or f.endswith('.fastq.gz') or f.endswith('.fq.gz')]
if len(path_fastqs) % 2 == 1: return False, [path_fastqs, None, None, None]
pair_obj = match_pairs(path_fastqs, True)
path_fastqs = pair_obj[0]
if pair_obj[1]==None: return False, pair_obj
return True, pair_obj
def get_directory_size(path):
return sum([get_file_size(os.path.join(path, fastq)) for fastq in os.listdir(path)])
def remove_directory_contents(path):
for f in os.listdir(path):
os.remove(os.path.join(path, f))
def choose_axe_adaptors(path_subsampled_fastqs, paired_end, output_path, threads):
adapters = ['TruSeq2', 'TruSeq3', 'TruSeq3-2', 'Nextera']
threads = min(threads, multiprocessing.cpu_count(), 16)
original_size = get_directory_size(os.path.dirname(path_subsampled_fastqs[0]))
logging.info('Original size of the subsampled_fastqs = ' + str(original_size))
best_size = original_size
best_adap = None
for adapter in adapters:
if paired_end:
axe_adaptors_paired_end(path_subsampled_fastqs, output_path, adapter, threads, shell=False)
else:
axe_adaptors_single_end(path_subsampled_fastqs, output_path, adapter, threads, shell=False)
fastqs_path_size = get_directory_size(output_path)
logging.info("Adapters: {adapter}\tFile Size: {filesize}".format(adapter=adapter, filesize=fastqs_path_size))
if fastqs_path_size <= best_size:
best_size = fastqs_path_size
best_adap = adapter
if best_size < 0.995*original_size:
# Actually write the best files again for use in later steps
logging.info("Best Adapters: {adapter}\tFile Size: {filesize}".format(adapter=best_adap, filesize=best_size))
if paired_end:
files = axe_adaptors_paired_end(path_subsampled_fastqs, output_path, best_adap, threads, shell=False)
else:
files = axe_adaptors_single_end(path_subsampled_fastqs, output_path, best_adap, threads, shell=False)
return best_adap, best_size, files
else:
return None, original_size, path_subsampled_fastqs
def flash_stitchable_and_check_outies(adapter_output_filenames, flash_output_path, threads):
flash_output_str = flash_part1(adapter_output_filenames, flash_output_path, max_overlap=700, \
min_overlap=10, allow_outies=True, threads=threads, shell=False)
allow_outies_count = 0
for flash_out in flash_output_str:
flash_str_list = flash_out.strip().split('\n')
outies_info = flash_str_list[-8]
outies_percent = float(outies_info[outies_info.find('(')+1:outies_info.find('%')])
if outies_percent >= 15:
allow_outies_count += 1
path_flash_fqs = flash_part2(flash_output_str, flash_output_path)
path_R1_fastqs, _ = split_fwd_rev(adapter_output_filenames)
matched_count = 0
for original_fq, flash_fq in zip(path_R1_fastqs, path_flash_fqs):
if count_num_lines(flash_fq) > count_num_lines(original_fq)*0.3:
matched_count = matched_count + 1
return matched_count/len(path_flash_fqs) >= 0.75, allow_outies_count/len(flash_output_str) >= 0.75, path_flash_fqs
def flash_check_cv(flash_output_path):
hist_files = [os.path.join(flash_output_path, f) for f in os.listdir(flash_output_path) if f.endswith('.hist')]
total_cv = total_mean = 0
for f in hist_files:
with open(f) as inf:
csv | _inf = csv.reader(inf, delimiter="\t")
x2f = 0
sum = 0
cnt = 0
for row in csv_inf:
row = [int(r) for r in row]
cnt = cnt + row[1]
sum = sum + row[0] * row[1]
x2f = x2f + row[ | 0] * row[0] * row[1]
mean = sum/cnt
std = math.sqrt((x2f - sum*sum/cnt)/(cnt-1))
cv = std/mean
total_cv = total_cv + cv
total_mean = total_mean + mean
total_files = len(hist_files)
return total_cv/total_files, total_mean/total_files
def trimmer_learning(flash_output_filenames):
filter_q_sum = 0
trim_q_sum = 0
totbases = 0
tottrim = 0
num = 0
for fq_path in flash_output_filenames:
with open(fq_path) as fq_inf:
fq_gen = read_fastq(fq_inf)
for gen in fq_gen:
num = num + 1
qualities = gen[2]
totbases = totbases + len(qualities)
qual |
pybuilder/pybuilder | src/main/python/pybuilder/_vendor/virtualenv/config/ini.py | Python | apache-2.0 | 2,807 | 0.001425 | from __future__ import absolute_import, unicode_literals
import logging
import os
from ...platformdirs import user_config_dir
from ..info import PY3
from ..util import ConfigParser
from ..util.path import Path
from ..util.six import ensure_str
from .convert import convert
class IniConfig(object):
VIRTUALENV_CONFIG_FILE_ENV_VAR = ensure_str("VIRTUALENV_CONFIG_FILE")
STATE = {None: "failed to parse", True: "active", False: "missing"}
section = "virtualenv"
def __init__(self, env=None):
env = os.environ if env is None else env
config_file = env.get(self.VIRTUALENV_CONFIG_FILE_ENV_VAR, None)
self.is_env_var = config_file is not None
config_file = (
Path(config_file)
if config_file is not None
else Path(user_config_dir(appname="virtualenv", appauthor="pypa")) / "virtualenv.ini"
)
self.config_file = config_file
self._cache = {}
exception = None
self.has_config_file = None
try:
self.has_config_file = self.config_file.exists()
except OSError as exc:
exception = exc
else:
if self.has_config_file:
self.config_file = self.config_file.resolve()
self.config_parser = ConfigParser.ConfigParser()
try:
self._load()
self.has_virtualenv_section = self.config_parser.has_section(self.section)
except Exception as exc:
exception = exc
if exception is not None:
logging.error("failed to read config file %s because %r", config_file, exception)
def _load(self):
with self.config_file.open("rt") as file_handler:
reader = getattr(self.config_parser, "read_file" if PY3 else "readfp")
reader(file_handler)
def get(self, key, as_type):
cache_key = key, as_type
if cache_key in self._cache:
return self._cache[cache_key]
# noinspection PyBroadException
try:
source = "file"
raw_value = self.config_parser.get(self.section, key.lower())
value = convert(raw_value, as_type, source)
result = value, source
except Exce | ption:
result = None
self._cache[cache_key] = result
return result
def __bool__(self):
return bool(self | .has_config_file) and bool(self.has_virtualenv_section)
@property
def epilog(self):
msg = "{}config file {} {} (change{} via env var {})"
return msg.format(
"\n",
self.config_file,
self.STATE[self.has_config_file],
"d" if self.is_env_var else "",
self.VIRTUALENV_CONFIG_FILE_ENV_VAR,
)
|
fmuzf/python_hk_glazer | hk_glazer/test/test.py | Python | mit | 1,705 | 0.022287 | from nose.tools import with_setup
import os
import hk_glazer as js2deg
import subprocess
import json
class TestClass:
@classmethod
def setup_class(cls):
cls.here = os.path.dirname(__file__)
cls.data = cls.here + '/data'
def test_1(self):
'''Test 1: Check that json_to_degree works when imported'''
with open(self.data + "/json_test_in.json") as config_file:
config_dict = json.load(config_file)
gen_str = js2deg.dict_to_dat(config_dict)
with open(self.data + "/json_test_out.txt") as verif_file:
test_str = verif_file.read()
assert(test_str == gen_str)
pass
def test_2(self):
'''Test 2: Check command line execution when saving to file'''
cmd = os.path.abspath(self.here + '/../../bin/hk_glazer')
print(cmd)
subprocess.check_call([cmd, "js2degree", self.data + "/json_test_in.json", "-o=test2.txt", "-s"])
with open("test2.txt") as file:
gen_str = file.read()
with open(self.data + "/json_test_out.txt") as file:
test_str = file.read()
assert(test_str == gen_str) |
os. | remove("test2.txt")
pass
def test_3(self):
'''Test 3: Command line execution when outfile already exists'''
cmd = os.path.abspath(self.here + '/../../bin/hk_glazer')
subprocess.check_call([cmd, "js2degree", self.data + "/json_test_in.json", "-o=test3.txt", "-s"])
try:
subprocess.check_call([cmd,"js2degree", self.data + "/json_test_in.json", "-o=test3.txt"])
except Exception as e:
#print(type(e))
assert(type(e) == subprocess.CalledProcessError)
pass
else:
assert(False)
finally:
os.remove("test3.txt")
|
edickie/ciftify | tests/test_ciftify_recon_all.py | Python | mit | 15,712 | 0.005474 | #!/usr/bin/env python3
import unittest
import logging
import importlib
import copy
import os
from docopt import docopt
from unittest.mock import patch
import pytest
import ciftify.utils
logging.disable(logging.CRITICAL)
ciftify_recon_all = importlib.import_module('ciftify.bin.ciftify_recon_all')
class ConvertFreesurferSurface(unittest.TestCase):
meshes = ciftify_recon_all.define_meshes('/somewhere/hcp/subject_1',
"164", ["32"], '/tmp/temp_dir', False)
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_option_adds_to_set_structure_command(self, mock_run):
secondary_type = 'GRAY_WHITE'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
surface_secondary_type=secondary_type)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' in args
assert secondary_type in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_not_set_if_option_not_used(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' not in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_wbcommand_surface_apply_affine_called_when_cras_option_set(self,
mock_run):
cras_file = '/somewhere/cras.mat'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
cras_mat=cras_file)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args and cras_file in args:
surface_apply_calls += 1
# The wb_command -surface-apply-affine command should be run once for
# each hemisphere
assert surface_apply_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_no_wbcommand_added_when_cras_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args:
surface_apply_calls += 1
assert surface_apply_calls == 0
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_adds_wbcommand_call(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=True)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
# Should add one call for each hemisphere
assert spec_added_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_not_present_when_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=False)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
assert spec_added_calls == 0
class CreateRegSphere(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run_MSMSulc_registration')
@patch('ciftify.bin.ciftify_recon_all.run_fs_reg_LR')
def test_reg_sphere_is_not_set_to_none_for_any_mode(self, mock_fs_reg,
mock_msm_reg):
"""
Should fail if MSMSulc registration is implemented without supplying a
value for reg_sphere
"""
# settings stub, to allow tests to be written.
class Settings(object):
def __init__(self, name):
self.high_res = 999
self.reg_name = name
self.ciftify_data_dir = '/somedir/'
self.msm_config = None
# Test reg_sphere set when in FS mode
settings = Settings('FS')
meshes = {'AtlasSpaceNative' : ''}
subject_id = 'some_id'
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
# Test reg_sphere set when in MSMSulc mode
settings = Settings('MSMSulc')
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
class CopyAtlasRoiFromTemplate(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.link_to_template_file')
def test_does_nothing_when_roi_src_does_not_exist(self, mock_link):
class Settings(object):
def __init__(self):
self.subject = self.Subject()
self.ciftify_data_dir = '/someotherpath/ciftify/data'
| self.work_dir = '/somepath/hcp'
class Subject(object):
def __init__(self):
id = 'some_id'
settings = S | ettings()
mesh_settings = {'meshname' : 'some_mesh'}
ciftify_recon_all.copy_atlas_roi_from_template(settings, mesh_settings)
assert mock_link.call_count == 0
class DilateAndMaskMetric(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run')
def test_does_nothing_when_dscalars_map_doesnt_mask_medial_wall(self,
mock_run):
# Stubs to allow testing
dscalars = {'some_map' : {'mask_medialwall' : False}}
mesh = {'tmpdir' : '/tmp/temp_dir',
'meshname' : 'some_mesh'}
ciftify_recon_all.dilate_and_mask_metric('some_id', mesh, dscalars)
assert mock_run.call_count == 0
@patch('os.makedirs')
@patch('ciftify.config.find_fsl')
class TestSettings(unittest.TestCase):
arguments = docopt(ciftify_recon_all.__doc__,
'--hcp-data-dir /somepath/pipelines/hcp --fs-subjects-dir /somepath/pipelines/freesurfer --surf-reg FS STUDY_SITE_ID_01')
subworkdir = '/somepath/pipelines/hcp/STUDY_SITE_ID_01'
yaml_config = {'high_res' : "164",
'low_res' : ["32"],
'grayord_res' : [2],
'dscalars' : {},
'registration' : {'src_dir' : 'T1w',
'dest_dir' : 'MNINonLinear',
'xfms_dir' : 'MNINonLinear/xfms'},
'FSL_fnirt' : {'2mm' : {'FNIRTConfig' : 'etc/flirtsch/T1_2_MNI152_2mm.cnf'}}}
def set_mock_env(self, mock_ciftify, mock_fsl, mock_makedirs):
# This is to |
okfde/odm-datenerfassung | utils/extractFileName.py | Python | mit | 660 | 0.010606 | import unicodecsv as csv
import sys
csvoutfile = open(s | ys.argv[3], 'wb')
citywriter = csv.writer(csvoutfile, delimiter=',')
with open(sys.argv[1], 'rb') as csvfile:
cityreader = csv.reader(csvfile, delimiter=',')
#Skip headings
headings = next(cityreader, None)
| #Write headings
citywriter.writerow(headings)
for inrow in cityreader:
url = inrow[1]
tcolumn = int(sys.argv[2])
filename = inrow[1].split('/')[-1]
if (len(inrow) - 1) < tcolumn:
inrow.append(filename)
else:
inrow[tcolumn] = filename
citywriter.writerow(inrow)
csvfile.close();
csvoutfile.close();
|
wulczer/flvlib | lib/flvlib/scripts/cut_flv.py | Python | mit | 7,154 | 0 | import sys
import logging
from optparse import OptionParser
from flvlib import __versionstr__
from flvlib.constants import TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT
from flvlib.constants import FRAME_TYPE_KEYFRAME
from flvlib.constants import H264_PACKET_TYPE_SEQUENCE_HEADER
from flvlib.constants import H264_PACKET_TYPE_NALU
from flvlib.astypes import MalformedFLV, FLVObject
from flvlib.tags import FLV, EndOfFile, AudioTag, VideoTag, ScriptTag
log = logging.getLogger('flvlib.cut-flv')
class CuttingAudioTag(AudioTag):
def parse(self):
parent = self.parent_flv
AudioTag.parse(self)
if not parent.first_media_tag_offset:
parent.first_media_tag_offset = self.offset
class CuttingVideoTag(VideoTag):
def parse(self):
parent = self.parent_flv
VideoTag.parse(self)
parent.no_video = False
if (not parent.first_media_tag_offset and
self.h264_packet_type != H264_PACKET_TYPE_SEQUENCE_HEADER):
parent.first_media_tag_offset = self.offset
tag_to_class = {
TAG_TYPE_AUDIO: CuttingAudioTag,
TAG_TYPE_VIDEO: CuttingVideoTag,
TAG_TYPE_SCRIPT: ScriptTag
}
class CuttingFLV(FLV):
def __init__(self, f):
FLV.__init__(self, f)
self.metadata = None
self.keyframes = FLVObject()
self.keyframes.filepositions = []
self.keyframes.times = []
self.no_video = True
self.audio_tag_number = 0
self.first_media_tag_offset = None
def tag_type_to_class(self, tag_type):
try:
return tag_to_class[tag_type]
except KeyError:
raise MalformedFLV("Invalid tag type: %d", tag_type)
def cut_file(inpath, outpath, start_time, end_time):
log.debug("Cutting file `%s' into file `%s'", inpath, outpath)
try:
f = open(inpath, 'rb')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", inpath, strerror)
return False
try:
fo = open(outpath, 'wb')
except IOError, (errno, strerror):
log.error("Failed to open `%s': %s", outpath, strerror)
return False
if start_time is None:
start_time = 0
else:
start_time = int(start_time)
if end_time is None:
end_time = -1
else:
end_time = int(end_time)
flv = CuttingFLV(f)
tag_iterator = flv.iter_tags()
last_tag = None
tag_after_last_tag = None
first_keyframe_after_start = None
try:
while True:
tag = tag_iterator.next()
# some buggy software, like gstreamer's flvmux, puts a metadata tag
# at the end of the file with timestamp 0, and we don't want to
# base our duration computation on that
if tag.timestamp != 0 and (
tag.timestamp <= end_time or end_time == -1):
last_tag = tag
elif tag_after_last_tag is None and tag.timestamp != 0:
tag_after_last_tag = tag
if not first_keyframe_after_start and tag.timestamp > start_time:
if isinstance(tag, VideoTag):
if (tag.frame_type == FRAME_TYPE_KEYFRAME and
tag.h264_packet_type == H264_PACKET_TYPE_NALU):
first_keyframe_after_start = tag
elif flv.no_video:
first_keyframe_after_start = tag
except MalformedFLV, e:
message = e[0] % e[1:]
log.error("The file `%s' is not a valid FLV file: %s", inpath, message)
return False
except EndOfFile:
log.error("Unexpected end of file on file `%s'", inpath)
return False
except StopIteration:
pass
if not flv.first_media_tag_offset:
log.error("The file `%s' does not have any media content", inpath)
return False
if not last_tag:
log.error("The file `%s' does not have any content with a "
"non-zero timestamp", inpath)
return False
if not first_keyframe_after_start:
log.error("The file `%s' has no keyframes greater than start time %d",
inpath, start_time)
return False
log.debug("Creating the output file")
log.debug("First tag to output %s", first_keyframe_after_start)
log.debug("Last tag to output %s", last_tag)
log.debug("Tag after last tag %s", tag_after_last_tag)
f.seek(0)
log.debug("copying up to %d bytes", flv.first_media_tag_offset)
fo.write(f.read(flv.first_media_tag_offset))
log.debug("seeking to %d bytes", first_keyframe_after_start.offset)
if tag_after_last_tag:
end_offset = tag_after_last_tag.offset
else:
f.seek(0, 2)
end_offset = f.tell()
log.debug("end offset %d", end_offset)
f.seek(first_keyframe_after_start.offset)
copy_bytes = end_offset - first_keyframe_after_start.offset
log.debug("copying %d bytes", copy_bytes)
fo.write(f.read(copy_bytes))
f.close()
fo.close()
return True
def process_options():
usage = "%prog file outfile"
description = ("Cut out part of a FLV file. Start and end times are "
"timestamps that will be compared to the timestamps "
"of tags from inside the file. Tags from outside of the "
"start/end range will be discarded, taking care to always "
"start the new file with a keyframe. "
"The script accepts one input and one output file path.")
version = "%%prog flvlib %s" % __versionstr__
parser = OptionParser(usage=usage, description=description,
version=version)
parser.add_option("-s", "--start-time", help="start time to cut from")
parser.add_option("-e", "--end-time", help="end time to cut to")
parser.add_option("-v", "--verbose", action="count",
default=0, dest="verbosity",
help="be more verbose, each -v increases verbosity")
options, args = parser.parse_args(sys.argv)
if len(args) < 2:
parser.error("You have to provide an input and output file path")
if not options.start_time and not options.end_time:
parser.error("You need to provide at least "
"one of start time or end time ")
if options.verbosity > 3:
options.verbosity = 3
log.setLevel({0: logging.ERROR, 1: logging.WARNING,
2: logging.INFO, 3: logging.DEBUG}[options.verbosity])
return options, args
def cut_files():
options, args = process_options()
return cut_file(args[1], args[2], options.start_time, options.en | d_time)
def main():
try:
outcome = cut_files()
except KeyboardInterrupt:
# give the right exit status, 128 + signal number
# signal.SIGINT = 2
sys.exit(128 + 2)
except EnvironmentError, (errno, strerror):
try:
print >> sys.stderr, strerror
except StandardError:
pass
| sys.exit(2)
if outcome:
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
|
pwyliu/packagecloud-poll | packagecloudpoll/poll.py | Python | mit | 4,074 | 0.003191 | """packagecloud-poll
Packagecloud-poll repeatedly polls the packagecloud API, looking for a specific package filename to appear. It is
intended to be used in continuous integration/continuous deployment scenarios where we want to block until we are sure
a package has been indexed and is avaiable before continuing.
All arguments are mandatory except for --timeou | t, --poll_interval, --page_interval and --log-level.
Increased productivity gains high favour from the machine god.
Usage:
packagecloud-poll --user user --repo repo_name --type type --distro distro --distro_version distro_version --arch arch --pkg_name pkg_name --filename filename [--timeout timeout] [--poll_interval inter | val] [--page_interval interval] [--log-level log_level]
packagecloud-poll --help
packagecloud-poll --version
Options:
--user <user> Packagecloud user.
--repo <repo_name> Packagecloud repository.
--type <type> Package type. (i.e. rpm or deb)
--distro <distro> Package distro. (i.e. ubuntu)
--distro_version <distro_version> Package distro version. (i.e. precise)
--arch <arch> Package arch. (i.e. amd64 or x86_64)
--pkg_name <pkg_name> Name of the package to poll for.
--filename <filename> File name of the package to poll for. (i.e mystuff_v5.3_precise_amd64.deb)
--timeout <timeout> Time in seconds to poll for [default: 600].
--poll_interval <interval> Polling interval in seconds [default: 30].
--page_interval <interval> API pagination interval. Adjust if you are worried about hitting the packagecloud API too fast. [default: 1].
--log-level <log_level> Set output log level. One of DEBUG, INFO, WARN, ERROR or CRITICAL [default: INFO].
--help Show this screen.
--version Show version.
"""
import logging
import sys
from datetime import datetime
from docopt import docopt, DocoptExit
import config
import pkgcloudapi
import utils
from ._version import __version__
def main():
# Load args
try:
args = docopt(__doc__, version=__version__)
except DocoptExit as ex:
# Sadly, docopt doesn't return exactly which argument was invalid.
sys.stderr.write('[{}] ERROR: Invalid argument.\n\n{}'.format(datetime.now(), ex))
sys.exit(1)
# Set up logger
logger = logging.getLogger(__package__)
log_level = getattr(logging, args['--log-level'].upper(), None)
# if log_level is DEBUG, set it for everything so Requests will log what
# it's doing too. Else just configure logger the normal way.
if log_level == logging.DEBUG:
logging.basicConfig(
level=log_level,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
logger.setLevel(log_level)
except TypeError:
sys.stderr.write('[{}] ERROR: Invalid log level specified: {}'.format(datetime.now(), args['--log-level']))
sys.exit(1)
try:
# validate command line args and check environment variables
args = config.validate_args(args)
env = config.load_env()
# engage
logger.info("ENGAGE")
logger.debug("Using arguments: \n{}".format(args))
if pkgcloudapi.look_for_package(env, args):
logger.info("Success. Found filename {}.".format(args['--filename']))
else:
utils.abort("Filename {} was not found during polling period.".format(args['--filename']))
except KeyboardInterrupt:
logger.info("\nOkay, bye.\n")
sys.exit(130)
# Done
sys.exit(0)
|
eusoubrasileiro/fatiando | setup.py | Python | bsd-3-clause | 3,101 | 0 | """
Build extension modules, package and install Fatiando.
"""
import sys
import os
from distutils.core import setup
from distutils.extension import Extension
import numpy
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'fatiando/_version.py'
versioneer.versionfile_build = 'fatiando/_version.py'
versioneer.tag_prefix = 'v'
versioneer.parentdir_prefix = '.'
NAME = 'fatiando'
FULLNAME = 'Fatiando a Terra'
DESCRIPTION = "Modeling and inversion for geophysics"
VERSION = versioneer.get_version()
CMDCLASS = versioneer.get_cmdclass()
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
PACKAGES = ['fatiando',
'fatiando.gravmag',
'fatiando.seismic',
'fatiando.geothermal',
'fatiando.vis',
'fatiando.inversion']
AUTHOR = "Leonardo Uieda"
AUTHOR_EMAIL = 'leouieda@gmail.com'
LICENSE = "BSD License"
URL = "http://www.fatiando.org"
PLATFORMS = "Any"
SCRIPTS = []
CLASSIFIERS = ["Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Environment :: Console",
"Programming Language :: Python :: 2.7",
"Programming Language :: Cython",
"License :: OSI Approved :: BSD License",
"Development Status :: 3 - Alpha",
"Natural Language :: English"]
KEYWORDS = 'geophysics modeling inversion gravimetry seismic magnetometry'
# The running setup.py with --cython, then set things up to generate the Cython
# .c files. If not, then compile the pre-converted C files.
USE_CYTHON = True if '--cython' in sys.argv else False
ext = '.pyx' if USE_CYTHON else '.c'
libs = []
if os.name == 'posix':
libs.append('m')
C_EXT = [[['fatiando', 'seismic', '_ttime2d'], {}],
[['fatiando', 'seismic', '_wavefd'], {}],
[['fatiando', 'gravmag', '_polyprism'], {}],
[['fatiando', 'gravmag', '_sphere'], {}],
[['fatiando', 'gravmag', '_prism'], {}],
]
extensions = []
for e, extra_args in C_EXT:
extensions.append( |
Extension('.'.join(e), [os.path.join(*e) + ext],
libraries=libs,
include_dirs=[numpy.get_include()],
**extra_args)) |
if USE_CYTHON:
sys.argv.remove('--cython')
from Cython.Build import cythonize
extensions = cythonize(extensions)
if __name__ == '__main__':
setup(name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
scripts=SCRIPTS,
packages=PACKAGES,
ext_modules=extensions,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
cmdclass=CMDCLASS)
|
UMD-AMAV/TerpCopter | terpcopter_libraries/mavlink/share/pyshared/pymavlink/mavextra.py | Python | gpl-3.0 | 4,562 | 0.00811 | #!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
from math import *
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(press_abs, ground_press=955.0, ground_temp=30):
'''calculate barometric altitude'''
return log(ground_press/press_abs)*(ground_temp+273.15)*29271.267*0.001
def mag_heading(RAW_IMU, ATTITUDE, declination=0, SENSOR_OFFSETS=None, ofs=None):
'''calculate heading from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
headX = mag_x*cos(ATTITUDE.pitch) + mag_y*sin(ATTITUDE.roll)*sin(ATTITUDE.pitch) + mag_z*cos(ATTITUDE.roll)*sin(ATTITUDE.pitch)
headY = mag_y*cos(ATTITUDE.roll) - mag_z*sin(ATTITUDE.roll)
heading = degrees(atan2(-headY,headX)) + declination
if heading < 0:
heading += 360
return heading
def mag_field(RAW_IMU, SENSOR_OFFSETS=None, ofs=None):
'''calculate magnetic field strength from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
def angle_diff(angle1, angle2):
'''show the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
lowpass_data = {}
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key]
last_delta = {}
def delta(var, key):
'''calculate slope'''
global last_delta
dv = 0
if key in last_delta:
dv = var - last_delta[key]
last_delta[key] = var
r | eturn dv
def delta_angle(var, key):
'''calculate slope of an angle'''
global last_delta
dv = 0
if key in last_delta:
dv = var - last_delta[key]
last_delta[key] = var
if dv > 180:
dv -= 360
if dv < -180:
dv += 360
return dv
def roll_estimate(RAW_IMU,smooth=0.7):
'''estimate roll from accelerometer'''
rx = lowpass(R | AW_IMU.xacc,'rx',smooth)
ry = lowpass(RAW_IMU.yacc,'ry',smooth)
rz = lowpass(RAW_IMU.zacc,'rz',smooth)
return degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2)))
def pitch_estimate(RAW_IMU, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = lowpass(RAW_IMU.xacc,'rx',smooth)
ry = lowpass(RAW_IMU.yacc,'ry',smooth)
rz = lowpass(RAW_IMU.zacc,'rz',smooth)
return degrees(asin(rx/sqrt(rx**2+ry**2+rz**2)))
def gravity(RAW_IMU, SENSOR_OFFSETS=None, ofs=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc
ry = RAW_IMU.yacc
rz = RAW_IMU.zacc+45
if SENSOR_OFFSETS is not None and ofs is not None:
rx += ofs[0] - SENSOR_OFFSETS.accel_cal_x
ry += ofs[1] - SENSOR_OFFSETS.accel_cal_y
rz += ofs[2] - SENSOR_OFFSETS.accel_cal_z
return lowpass(sqrt(rx**2+ry**2+rz**2)*0.01,'_gravity',smooth)
def pitch_sim(SIMSTATE, GPS_RAW):
'''estimate pitch from SIMSTATE accels'''
xacc = SIMSTATE.xacc - lowpass(delta(GPS_RAW.v,"v")*6.6, "v", 0.9)
zacc = SIMSTATE.zacc
zacc += SIMSTATE.ygyro * GPS_RAW.v;
if xacc/zacc >= 1:
return 0
if xacc/zacc <= -1:
return -0
return degrees(-asin(xacc/zacc))
def distance_two(GPS_RAW1, GPS_RAW2):
'''distance between two points'''
lat1 = radians(GPS_RAW1.lat)
lat2 = radians(GPS_RAW2.lat)
lon1 = radians(GPS_RAW1.lon)
lon2 = radians(GPS_RAW2.lon)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin(0.5*dLat) * sin(0.5*dLat) + sin(0.5*dLon) * sin(0.5*dLon) * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
return 6371 * 1000 * c
first_fix = None
def distance_home(GPS_RAW):
'''distance from first fix point'''
global first_fix
if first_fix == None or first_fix.fix_type < 2:
first_fix = GPS_RAW
return 0
return distance_two(GPS_RAW, first_fix)
|
rembish/mls | setup.py | Python | bsd-2-clause | 1,212 | 0 | #!/usr/bin/env python
from os.path import dirname, abspath, join
from setuptools import setup
here = abspath(dirname(__file__))
readme = open(join(here, "README.rst"))
setup(
name="mls",
version="1.2.2",
py_modules=["mls"],
url="https://github.com/rembish/mls",
license="BSD",
author="Aleksey Rembish",
author_email="alex@rembish.org",
description="MultiLingualString",
long_description="".join(readme.readli | nes()),
test_suite="tests",
install_requires=["six"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programmin | g Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
)
|
andras-tim/tchart | tests/test_decorators_paper.py | Python | gpl-3.0 | 1,537 | 0.000651 | # -*- coding: UTF-8 -*-
# pylint: disable=misplaced-comparison-constant,redefined-outer-name,no-self-use
import pytest
from tchart.decorators import PaperDecorator
@pytest.mark.parametrize('lines,expected_lines', (
(
[
u'0',
],
[
u' .---. ',
u' / . \\ ',
u' |\\_/| |',
u' | | /|',
u' .-------- |',
u' / .-. 0 / ',
u'| |--\' ',
u'\\ | ',
u' \\ / ',
u' `---\' ',
],
),
(
[
u'orange kako banana',
u' kiwi ',
u'mango',
u'pulp | ',
],
[
u' .---. ',
u' / . \\ ',
u' |\\_/| |',
u' | | /|',
u' .-------------------------\' |',
u' / .-. orange kako banana |',
u'| / \\ kiwi |',
| u'| |\\_. | mango |',
u'|\\| | /| pulp / ',
u'| |-------------------\' ',
u'\\ | ',
u' \\ / ',
u' `---\' ',
],
),
))
def test_decorate(lines, expected_lines):
decorator = PaperDecorator()
assert decorator.decorate(lines=lines) == expected_lines
|
ucsd-ccbb/Oncolist | src/restLayer/app/SearchDrugsTab.py | Python | mit | 15,395 | 0.006236 | __author__ = 'aarongary'
from app import PubMed
from app import elastic_search_uri
from collections import Counter
from elasticsearch import Elasticsearch
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300)
#============================
#============================
# DRUG SEARCH
#============================
#============================
def get_drug_network_search_mapped(queryTerms):
network_information = {
'searchGroupTitle': 'Cluster Network',
'searchTab': 'DRUG',
'network': 'drug_network',
'matchField': 'x_node_list.name',
'matchCoreNode': 'node_name',
'cancerType': 'BRCA',
'queryTerms': queryTerms
}
phenotype_network_data = drug_network_search_mapped(network_information)
#phenotype_network_data = drug_network_search_gene_centric(network_information)
return [phenotype_network_data]
def drug_network_search_mapped(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info['searchTab'],
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1',
'document_ids': [],
'inferred_drugs': [],
'overlap_counts': []
}
unsorted_items = []
gene_super_list = []
overlap_counts_array = []
overlap_found = False
queryTermArray = network_info['queryTerms'].split(',')
sorted_query_list = PubMed.get_gene_pubmed_counts_normalized(network_info['queryTerms'], 1)
gene_network_data['geneSuperList'] = get_geneSuperList(queryTermArray, sorted_query_list)
network_info['queryTerms'] = network_info['queryTerms'].replace(",", "*")
should_match = []
for queryTerm in queryTermArray:
boost_value_append = get_boost_value(sorted_query_list['results'], queryTerm)
should_match.append({"match": {"node_list.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'size': 50
}
result = es.search(
index = 'drugs',
doc_type = 'drugs_drugbank',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
#==================================
# PROCESS EACH SEARCH RESULT
#==================================
hitCount = 0
hitMax = 0
hitMin = 0
if(result['hits']['total'] < 1):
print 'no results'
for hit in result['hits']['hits']:
if(hitCount == 0):
hitMax = hit['_score']
| else:
hitMin = hit['_sco | re']
geneNeighborhoodArray = [];
scoreRankCutoff = 0.039
node_list_name_and_weight = []
for geneNodeHit in hit["_source"]["node_list"]:
geneNeighborhoodArray.append(geneNodeHit['name'])
x = [set(geneNeighborhoodArray), set(queryTermArray)]
y = set.intersection(*x)
emphasizeInfoArrayWithWeights = []
for genehit in y:
node_list_items = hit["_source"]["node_list"]
match = (item for item in node_list_items if item["name"] == genehit).next()
emphasizeInfoArrayWithWeights.append(match)
for gene_network_matched in y:
gene_super_list.append(gene_network_matched)
for match_this_overlap in overlap_counts_array:
if(gene_network_matched == match_this_overlap['gene']):
match_this_overlap['count'] += 1
overlap_found = True
break
if(not overlap_found):
overlap_counts_array.append(
{
'gene': gene_network_matched,
'count': 1
}
)
searchResultSummaryString = 'drugbank-' + str(hit["_source"]["degree"])
#searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["total_degree"])
hit_score = float(hit["_score"])
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"], #DrugBank.get_drugbank_synonym(hit["_source"]["node_name"]),
'diseaseType': '',
'clusterName': hit["_source"]["drugbank_id"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': list(y),
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'hit_id': hit['_id']
}
gene_network_data['document_ids'].append(hit['_id'])
unsorted_items.append(gene_network_data_items)
hitCount += 1
print hitCount
foundHit = False
for network_data_item in unsorted_items:
foundHit = False
for sortedID in sorted_query_list['results']:
if sortedID['id'] == network_data_item['clusterName']:
network_data_item['pubmedCount'] = sortedID['count']
network_data_item['searchResultScoreRank'] = sortedID['normalizedValue']
gene_network_data['items'].append(network_data_item)
foundHit = True
if(not foundHit):
network_data_item['pubmedCount'] = 0
network_data_item['searchResultScoreRank'] = 0
gene_network_data['items'].append(network_data_item)
counter_gene_list = Counter(gene_super_list)
for key, value in counter_gene_list.iteritems():
kv_item = {'queryTerm': key,
'boostValue': value}
#gene_network_data['geneSuperList'].append(kv_item)
#===============================
# GROUP DRUGS BY TARGETED GENE
#===============================
drug_gene_grouping = []
for drug_hit in gene_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in drug_gene_grouping:
if(len(drug_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == drug_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']})
match_found = True
# First item added
if(not match_found):
if(len(drug_hit['emphasizeInfoArray']) > 0):
drug_gene_grouping.append(
{
'gene_name': drug_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
else:
drug_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'drug_name': drug_hit['searchResultTitle'],
'drugbank_id': drug_hit['clusterName']}]
}
)
for drug_gene_no_count_item in drug_gene_grouping:
drug_gene_no_count_item['gene_count'] = len(drug_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
gene_network_data['grouped_items'] = drug_gene_grouping
gene_network_data['overlap_counts'] = overlap_counts_array
return gene_network_data
def drug_network_search_gene_centric(network_info, disease=[]):
gene_network_data = {
'searchGroupTitle': network_info['searchGroupTitle'],
'clusterNodeName': "",
'searchTab': network_info |
pdehaye/theming-edx-platform | lms/djangoapps/bulk_email/tasks.py | Python | agpl-3.0 | 10,209 | 0.003624 | """
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import math
import re
import time
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError
from django.conf import settings
from django.contrib.auth.models im | port User, Group
from django.core.mail import EmailMultiAlternatives, get_connection
from django.http import Http404
from celery import task, current_task
from celery.utils.log import get_task_logger
from django.core.urlresolvers import reverse
from statsd import statsd
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SE | ND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_ALL,
)
from courseware.access import _course_staff_group_name, _course_instructor_group_name
from courseware.courses import get_course_by_id, course_image_url
log = get_task_logger(__name__)
@task(default_retry_delay=10, max_retries=5) # pylint: disable=E1102
def delegate_email_batches(email_id, user_id):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of settings.EMAILS_PER_TASK size,
and queueing up worker jobs.
Returns the number of batches (workers) kicked off.
"""
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
# The retry behavior here is necessary because of a race condition between the commit of the transaction
# that creates this CourseEmail row and the celery pipeline that starts this task.
# We might possibly want to move the blocking into the view function rather than have it in this task.
log.warning("Failed to get CourseEmail with id %s, retry %d", email_id, current_task.request.retries)
raise delegate_email_batches.retry(arg=[email_id, user_id], exc=exc)
to_option = email_obj.to_option
course_id = email_obj.course_id
try:
course = get_course_by_id(course_id, depth=1)
except Http404 as exc:
log.exception("get_course_by_id failed: %s", exc.args[0])
raise Exception("get_course_by_id failed: " + exc.args[0])
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
elif to_option == SEND_TO_ALL or to_option == SEND_TO_STAFF:
staff_grpname = _course_staff_group_name(course.location)
staff_group, _ = Group.objects.get_or_create(name=staff_grpname)
staff_qset = staff_group.user_set.all()
instructor_grpname = _course_instructor_group_name(course.location)
instructor_group, _ = Group.objects.get_or_create(name=instructor_grpname)
instructor_qset = instructor_group.user_set.all()
recipient_qset = staff_qset | instructor_qset
if to_option == SEND_TO_ALL:
enrollment_qset = User.objects.filter(courseenrollment__course_id=course_id,
courseenrollment__is_active=True)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
else:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
recipient_qset = recipient_qset.order_by('pk')
total_num_emails = recipient_qset.count()
num_queries = int(math.ceil(float(total_num_emails) / float(settings.EMAILS_PER_QUERY)))
last_pk = recipient_qset[0].pk - 1
num_workers = 0
for _ in range(num_queries):
recipient_sublist = list(recipient_qset.order_by('pk').filter(pk__gt=last_pk)
.values('profile__name', 'email', 'pk')[:settings.EMAILS_PER_QUERY])
last_pk = recipient_sublist[-1]['pk']
num_emails_this_query = len(recipient_sublist)
num_tasks_this_query = int(math.ceil(float(num_emails_this_query) / float(settings.EMAILS_PER_TASK)))
chunk = int(math.ceil(float(num_emails_this_query) / float(num_tasks_this_query)))
for i in range(num_tasks_this_query):
to_list = recipient_sublist[i * chunk:i * chunk + chunk]
course_email.delay(
email_id,
to_list,
course.display_name,
course_url,
image_url,
False
)
num_workers += num_tasks_this_query
return num_workers
@task(default_retry_delay=15, max_retries=5) # pylint: disable=E1102
def course_email(email_id, to_list, course_title, course_url, image_url, throttle=False):
"""
Takes a primary id for a CourseEmail object and a 'to_list' of recipient objects--keys are
'profile__name', 'email' (address), and 'pk' (in the user table).
course_title, course_url, and image_url are to memoize course properties and save lookups.
Sends to all addresses contained in to_list. Emails are sent multi-part, in both plain
text and html.
"""
try:
msg = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
log.exception("Could not find email id:{} to send.".format(email_id))
raise
# exclude optouts
optouts = (Optout.objects.filter(course_id=msg.course_id,
user__in=[i['pk'] for i in to_list])
.values_list('user__email', flat=True))
optouts = set(optouts)
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
subject = "[" + course_title + "] " + msg.subject
course_title_no_quotes = re.sub(r'"', '', course_title)
from_addr = '"{0}" Course Staff <{1}>'.format(course_title_no_quotes, settings.DEFAULT_BULK_FROM_EMAIL)
course_email_template = CourseEmailTemplate.get_template()
try:
connection = get_connection()
connection.open()
num_sent = 0
num_error = 0
# Define context values to use in all course emails:
email_context = {
'name': '',
'email': '',
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
while to_list:
# Update context with user-specific values:
email = to_list[-1]['email']
email_context['email'] = email
email_context['name'] = to_list[-1]['profile__name']
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(msg.text_message, email_context)
html_msg = course_email_template.render_htmltext(msg.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we tried a few times and got the rate limiter
if throttle or current_task.request.retries > 0:
time.sleep(0.2)
try:
connection.send_messages([email_msg])
statsd.increment('course_email.sent', tags=[_statsd_tag(course_title)])
log.info('Email with id %s sent to %s', email_id, email)
num_sent += 1
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task
raise exc
|
Fantomas42/django-blog-zinnia | zinnia/admin/category.py | Python | bsd-3-clause | 1,209 | 0 | """CategoryAdmin for Zinnia"""
from django.contrib import admin
from django.urls import NoReverseMatch
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from zinnia.admin.forms import CategoryAdminForm
class CategoryAdmin(admin.ModelAdmin):
"""
Admin for Category model.
"""
form = CategoryAdminForm
fields = ('title', 'paren | t', 'description', 'slug')
list_display = ('title', 'slug', 'get_tree_path', 'description')
sortable_by = ('title', 'slug')
prepopulated_fields = {'slug': ('title', )}
search_fields = ('title', 'description')
list_filter = ('parent',)
def __init__(self, model, admin_site):
self.form.admin_site = admin_site
super(CategoryAdmin, self).__init__(model, admin_site)
def get_tree_path(self, category | ):
"""
Return the category's tree path in HTML.
"""
try:
return format_html(
'<a href="{}" target="blank">/{}/</a>',
category.get_absolute_url(), category.tree_path)
except NoReverseMatch:
return '/%s/' % category.tree_path
get_tree_path.short_description = _('tree path')
|
sacharya/nova | nova/weights.py | Python | apache-2.0 | 4,485 | 0.000223 | # Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Pluggable Weighing support
"""
import abc
from nova import loadables
def normalize(weight_list, minval=None, maxval=None):
"""Normalize the values in a list between 0 and 1.0.
The normalization is made regarding the lower and upper values present in
weight_list. If the minval and/or maxval parameters are set, these values
will be used instead of the minimum and maximum from the list.
If all the values are equal, they are normalized to 0.
"""
if not weight_list:
return ()
if maxval is None:
maxval = max(weight_list)
if minval is None:
minval = min(weight_list)
maxval = float(maxval)
minval = float(minval)
if minval == maxval:
return [0] * len(weight_list)
range_ = maxval - minval
return ((i - minval) / range_ for i in weight_list)
class WeighedObject(object):
"""Object with weight information."""
def __init__(self, obj, weight):
self.obj = obj
self.weight = weight
def __repr__(self):
return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
class BaseWeigher(object):
"""Base class for pluggable weighers.
The attributes maxval and minval can be specified to set up the maximum
and minimum values for the weighed objects. These values will then be
taken into account in the normalization step, instead of taking the values
from the calculated weights.
"""
__metaclass__ = abc.ABCMeta
minval = None
maxval = None
def weight_multiplier(self):
"""How weighted this weigher should be.
Override this method in a subclass, so that the returned value is
read from a configuration option to permit operators specify a
multiplier for the weigher.
"""
return 1.0
@abc.abstractmethod
def _weigh_object(self, obj, weight_properties):
"""Weigh an specific object."""
def weigh_objects(self, weighed_obj_list, weight_properties):
"""Weigh multiple objects.
Override in a subclass if you need access to all objects in order
to calculate weights. Do not modify the weight of an object here,
just return a list of weights.
"""
# Calculate the weights
weights = []
for obj in weighed_obj_list:
weight = self._weigh_object(obj.obj, weight_properties)
# Record the min and max values if they are None. If they anything
# but none we assume that the weigher has set them
if self.minval is None:
self.minval = weight
if self.maxval is None:
self.maxval = weight
if weight < self.minval:
self.minval = weight
elif weight > self.maxval:
self.ma | xval = weight
weights.append(weight)
return weights
class BaseWeightHandler(loadables.BaseLoader):
object_class = WeighedObject
def get_weighed_objects(self, weigher_classes, obj_list,
weighing_properties | ):
"""Return a sorted (descending), normalized list of WeighedObjects."""
if not obj_list:
return []
weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
for weigher_cls in weigher_classes:
weigher = weigher_cls()
weights = weigher.weigh_objects(weighed_objs, weighing_properties)
# Normalize the weights
weights = normalize(weights,
minval=weigher.minval,
maxval=weigher.maxval)
for i, weight in enumerate(weights):
obj = weighed_objs[i]
obj.weight += weigher.weight_multiplier() * weight
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.