repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mrpau/kolibri
|
kolibri/core/discovery/utils/network/search.py
|
Python
|
mit
| 6,523
| 0.00138
|
import json
import logging
import socket
from contextlib import closing
from django.core.exceptions import ValidationError
from django.db import connection
from zeroconf import get_all_addresses
from zeroconf import NonUniqueNameException
from zeroconf import ServiceInfo
from zeroconf import USE_IP_OF_OUTGOING_INTERFACE
from zeroconf import Zeroconf
from kolibri.core.discovery.models import DynamicNetworkLocation
from kolibri.core.public.utils import get_device_info
logger = logging.getLogger(__name__)
SERVICE_TYPE = "Kolibri._sub._http._tcp.local."
LOCAL_DOMAIN = "kolibri.local"
ZEROCONF_STATE = {"zeroconf": None, "listener": None, "service": None}
def _id_from_name(name):
assert name.endswith(SERVICE_TYPE), (
"Invalid service name; must end with '%s'" % SERVICE_TYPE
)
return name.replace(SERVICE_TYPE, "").strip(".")
def _is_port_open(host, port, timeout=1):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(timeout)
return sock.connect_ex((host, port)) == 0
class KolibriZeroconfService(object):
info = None
def __init__(self, id, port=8080, data={}):
self.id = id
self.port = port
self.data = {key: json.dumps(val) for (key, val) in data.items()}
def register(self):
if not ZEROCONF_STATE["zeroconf"]:
initialize_zeroconf_listener()
if self.info is not None:
logger.error("Service is already registered!")
return
i = 1
id = self.id
while not self.info:
# attempt to create an mDNS service and register it on the network
try:
info = ServiceInfo(
SERVICE_TYPE,
name=".".join([id, SERVICE_TYPE]),
server=".".join([id, LOCAL_DOMAIN, ""]),
address=USE_IP_OF_OUTGOING_INTERFACE,
port=self.port,
properties=self.data,
)
ZEROCONF_STATE["zeroconf"].register_service(info, ttl=60)
self.info = info
except NonUniqueNameException:
# if there's a name conflict, append incrementing integer until no conflict
i += 1
id = "%s-%d" % (self.id, i)
if i > 100:
raise NonUniqueNameException()
self.id = id
return self
def unregister(self):
if self.info is None:
logging.error("Service is not registered!")
return
ZEROCONF_STATE["zeroconf"].unregister_service(self.info)
self.info = None
def cleanup(self, *args, **kwargs):
if self.info and ZEROCONF_STATE["zeroconf"]:
self.unregister()
class KolibriZeroconfListener(object):
instances = {}
def add_service(self, zeroconf, type, name):
timeout = 5000
info = zeroconf.get_service_info(type, name, timeout=timeout)
if info is None:
logger.warn(
"Zeroconf network service information could not be retrieved within {} seconds".format(
str(timeout / 1000.0)
)
)
return
id = _id_from_name(name)
ip = socket.inet_ntoa(info.address)
base_url = "http://{ip}:{port}/".format(ip=ip, port=info.port)
zeroconf_service = ZEROCONF_STATE.get("service")
is_self = zeroconf_service and zeroconf_service.id == id
instance = {
"id": id,
"ip": ip,
"local": ip in get_all_addresses(),
"port": info.port,
"host": info.server.strip("."),
"base_url": base_url,
"self": is_self,
}
device_info = {
bytes.decode(key): json.loads(val) for (key, val) in info.properties.items()
}
instance.update(device_info)
self.instances[id] = instance
if not is_self:
try:
DynamicNetworkLocation.objects.update_or_create(
dict(base_url=base_url, **device_info), id=id
)
logger.info(
"Kolibri instance '%s' joined zeroconf network; service info: %s"
% (id, self.instances[id])
)
except ValidationError:
import traceback
logger.warn(
"""
A new Kolibri instance '%s' was seen on the zeroconf network,
but we had trouble getting the information we needed about it.
Service info:
%s
The following exception was raised:
%s
"""
% (id, self.instances[id], traceback.format_exc(limit=1))
)
finally:
connection.close()
def remove_service(self, zeroconf, type, name):
id = _id_from_name(name)
logger.info("Kolibri instance '%s' has left the zeroconf network." % (id,))
try:
if id in self.instances:
del self.instances[id]
except KeyError:
pass
DynamicNetworkLocation.objects.filter(pk=id).delete()
connection.close()
def register_zeroconf_service(port):
device_info = get_device_info()
DynamicNetworkLocation.objects.all().delete()
connection.close()
id = device_info.get("instance_id")
if ZEROCONF_STATE["service"] is not None:
unregister_zeroconf_service()
logger.info("Registering ourselves to zeroconf network with id '%s'..." % id)
data = device_info
ZEROCONF_STATE["service"] = KolibriZeroconfService(id=id, port=port, data=data)
ZEROCONF_STATE["service"].register()
def unregister_zeroconf_service():
if ZEROCONF_STATE["service"] is not None:
ZEROCONF_STATE["service"].cleanup()
ZEROCONF_STATE["service"] = None
if ZEROCONF_STATE["zeroconf"] is not None:
ZEROCONF_STATE["zeroconf"].close()
def initialize_zeroconf_listener():
ZEROCONF_STATE["zeroconf"] = Zeroconf()
ZEROCONF_STATE["listener"] = KolibriZerocon
|
fListener()
ZEROCONF_STATE["zeroconf"].add_service_listener(
SERVICE_TYPE, ZEROCONF_STATE["listener"]
)
def get_peer_in
|
stances():
try:
return ZEROCONF_STATE["listener"].instances.values()
except AttributeError:
return []
|
klahnakoski/jx-sqlite
|
vendor/jx_python/expressions/between_op.py
|
Python
|
mpl-2.0
| 438
| 0
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Pu
|
blic
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import
|
BetweenOp as BetweenOp_
class BetweenOp(BetweenOp_):
pass
|
smithk86/flask-sse
|
flask_sse/server_sent_event.py
|
Python
|
mit
| 1,387
| 0.000721
|
import json
from copy import copy
from collections import OrderedDict
# SSE "protocol" is described here: http://mzl.la/UPFyxY
class ServerSentEvent(object):
def __init__(self, data=None, event=None, retry=None, id=None):
if data is None and event is None:
raise ValueError('data and event cannot both be None')
self.data = data
self.event = event
self.retry = retry
self.id = id
def __copy__(self):
return ServerSentEvent(
data=copy(self.data),
event=self.event,
retry=self.retry,
id=self.id
)
def format(self):
items = OrderedDict(self)
if items['data'] is None:
items['data'] = '-'
elif isinstance(items['data'], str):
pass
else:
items['data'] = json.dumps(items['data'])
return items
def __iter
|
__(self):
if self.retry:
yield 'retry', self.retry
yield 'data', self.data
if sel
|
f.event:
yield 'event', self.event
if self.id:
yield 'id', self.id
def __str__(self):
return '{}\n\n'.format('\n'.join(
['{}: {}'.format(k, v) for k, v in self.format().items()]
))
def __repr__(self):
return '<ServerSentEvent event="{}">'.format(self.event if self.event else '')
|
RainbowAcademy/ScriptingLectures
|
2015/ContourLine/DisplaceFromImage.py
|
Python
|
gpl-2.0
| 4,181
| 0.028223
|
__author__ = 'a.paoletti'
import maya.cmds as cmd
import os
import sys
sys.path.append("C://Users//a.paoletti//Desktop//MY//CORSOSCRIPTING - DISPLACE_GEOTIFF//gdalwin32-1.6//bin")
import colorsys
def getTexture():
"""
:rtype : String
:return : Nome della texture applicata al canale color del lambert
"""
sel = cmd.ls(sl=True)
print '--------- Selection is: ' + sel[0] + ' ---------'
selMesh = cmd.listRelatives(sel, s=True)
print '----- Shape: ' + selMesh[0]
selSG = cmd.listConnections(selMesh[0], t='shadingEngine')
print '----- Shading group: ' + selSG[0]
selMat = cmd.listConnections(selSG[0], t='lambert')
print '----- Material: ' + selMat[0]
selTexture = cmd.listConnections(selMat[0]+'.color')
print '--------- La texture e\': ' + selTexture[0] + ' ---------'
return selTexture[0]
def testColorAtPoint():
# per testare questa funzione seleziona la mesh nel suo intero in object mode
txtName = getTexture()
colors = cmd.colorAtPoint(txtName, o='RGB', su=16, sv=16, mu=0.0, mv=0.0, xu=0.5, xv=0.5)
print colors
def clamp(my_value, min_value, max_value):
return max(min(my_value, max_value), min_value)
def colorToElevation(r, g, b):
"""
Data una terna RGB, ritorna il valore dell'altezza interpretando l'immagine
come mappa fisica
:param r: red component between 0 and 1
:param g: green component between 0 and 1
:param b: blue component between 0 and 1
:return: Float che rappresenta l'elevazione del punto
"""
hsvColor = colorsys.rgb_to_hsv(r, g, b)
h = hsvColor[0]
s = hsvColor[1]
v = hsvColor[2]
base = 5
elevation = 0
# print "H--- " + str(h) + "S--- " + str(s) + "V--- " + str(v)
# if v > 0.5:
tmp = clamp((0.23-h), 0, 1) # 0 blue 1 rosso
elevation = pow(base, tmp+1) - base
return elevation
def testGeoSampler():
sel = cmd.ls(sl=True)
if len(sel) == 0:
raise Exception("Selezionare il piano!")
print '--------- Selection is: ' + sel[0] + ' ---------'
cmd.selectMode(component=True)
cmd.select(sel[0]+'.vtx[:]')
cmd.polyGeoSampler(cs=False, cdo=False, dg=False, ac=True, bf=False)
vtxNumber = len(cmd.getAttr(sel[0]+'.vtx[:]'))
# cmd.softSelect(sse=1, ssd=1)
for i in range(0, vtxNumber):
v = sel[0]+'.vtx[%d]' % i
cmd.select(v, r=True)
vColor = cmd.polyColorPerVertex(query=True, r=True, g=True, b=True)
r = vColor[0]
g = vColor[1]
b = vColor[2]
h = colorToElevation(r, g, b)
cmd.move(h, y=True, r=True)
cmd.softSelect(sse=0)
cmd.selectMode(object=True)
def readGeoTiff(filepath):
try:
from osgeo import gdal
except:
raise Exception("Cannot find gdal modules")
# enable GDAL exceptions
gdal.UseException()
ds = gdal.Open(filepath)
band = ds.GetRasterBand(1)
elevation = band.ReadAsArray()
print elevation.shape
print elevation
def readGeoTiff2(filepath):
import gdal
import gdalconst
# coordinates to get pixel values for
xValues = [122588.008]
yValues = [484475.146]
# set directory
os.chdir(r'D:\\
|
temp\\AHN2_060')
# register all of the drivers
gdal.AllRegister()
# open the image
ds = gdal.Open(filepath, GA_ReadOnly)
if ds is None:
print 'Could not open image'
sys.exit(1)
# get image size
rows = ds.RasterYSize
cols = ds.RasterXSize
bands = ds.RasterCount
# ge
|
t georeference info
transform = ds.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
# loop through the coordinates
for xValue, yValue in zip(xValues, yValues):
# get x,y
x = xValue
y = yValue
# compute pixel offset
xOffset = int((x - xOrigin) / pixelWidth)
yOffset = int((y - yOrigin) / pixelHeight)
# create a string to print out
s = "%s %s %s %s " % (x, y, xOffset, yOffset)
# loop through the bands
for i in xrange(1, bands):
band = ds.GetRasterBand(i) # 1-based index
# read data and add the value to the string
data = band.ReadAsArray(xOffset, yOffset, 1, 1)
value = data[0, 0]
s = "%s%s " % (s, value)
# print out the data string
print s
# figure out how long the script took to run
|
eldruz/tournament_registration
|
tournament_registration/capitalism/models.py
|
Python
|
bsd-3-clause
| 3,092
| 0.000323
|
from satchless.item import InsufficientStock, StockedItem
from datetime import date
from django.utils.text import slugify
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db import models
from django_prices.models import PriceField
from django.core.exceptions import ValidationError
from registration.models import Tournament
class Product(models.Model, StockedItem):
"""An abstract class that embodies everything that can be sold."""
price = PriceField('Price',
currency='EUR',
max_digits=5,
decimal_places=2,
blank=False,
default=0.0)
stock = models.PositiveSmallIntegerField('Product Stock',
blank=False,
default=0)
date_added = models.DateField('Date added')
last_modified = models.DateTimeField('Last modified')
slug = models.SlugField('Product slug', max_length=256)
def get_price_per_item(self):
return price
class Meta:
abstract = True
class TournamentProductUtilitiesManager(models.Manager):
def create_tournament_product(self, tournament, price=0.0, stock=0):
tourney_product = TournamentProduct(tournament=tournament,
price=price,
stock=stock,
date_added=date.today(),
last_modified=timezone.now())
tourney_product.save()
return tourney_product
def update_tournament_product(self, product_id, **kwargs):
additional_attributes = {'price', 'stock', 'tournament'}
tourney_
|
product = TournamentProduct.objects.get(pk=product_id)
for attribute, value in kwargs.items():
assert attribute in additional_attributes
setatt
|
r(tourney_product, attribute, value)
tourney_product.save()
return tourney_product
def delete_tournament_product(self, product_id):
tourney_product = TournamentProduct.objects.get(pk=product_id)
tourney_product.delete()
class TournamentProduct(Product):
tournament = models.OneToOneField(Tournament)
objects = models.Manager()
utilities = TournamentProductUtilitiesManager()
def get_stock(self):
return self.stock
def save(self, *args, **kwargs):
" Override the save method to check the stock and set the slug "
if self.stock > self.tournament.get_available_spots():
msg = 'Stock of a TournamentProduct cannot be greater than the \
tournament available spots'
raise ValidationError(msg)
self.slug = slugify(unicode(self.tournament.slug))
super(TournamentProduct, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('tournament_product_detail', kwargs={'slug': self.slug})
def __unicode__(self):
return self.tournament.title
|
openstack/vitrage-dashboard
|
vitrage_dashboard/alarms/panel.py
|
Python
|
apache-2.0
| 733
| 0
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
|
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class AlarmsVitrage(horizon.Panel):
name =
|
_("Alarms")
slug = "vitragealarms"
|
NERC-CEH/jules-jasmin
|
majic/joj/lib/wms_capability_cache.py
|
Python
|
gpl-2.0
| 2,164
| 0.00878
|
"""
Manages a Beaker cache of WMS capabilities documents.
@author: rwilkinson
"""
import logging
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from joj.lib.wmc_util import GetWebMapCapabilities
log = logging.getLogger(__name__)
class WmsCapabilityCache():
""" Manages a Beaker cache of WMS capabilities documents.
"""
def __init__(self, config):
"""Creates a cache using the supplied configuration parameters or defaults.
"""
self.enableCache = (config.get('wmscapabilitycache.enable', 'True').lower() == 'true')
if self.enableCache:
cache_opts = {
'cache.expire': config.get('wmscapabilitycache.expire', None),
'cache.type':
|
config.get('wmscapabilitycache.type', 'file'),
'cache.data_dir': config.get('wmscapabilitycache.data_dir', '/tmp/ecomaps/wmscapabilitycache/data'),
'cache.lock_dir': config.get('wmscapabilitycache.lock_dir', None)
}
cacheMgr = CacheManager(**parse_cache_config_options(cache_opts))
self.cache = cacheMgr.get_cache('getWmsCapabilities')
log.info("WMS capability c
|
aching %s" % ("enabled" if self.enableCache else "disabled"))
def getWmsCapabilities(self, wmsurl, forceRefresh):
"""Gets the WMS capabilities for an endpoint URL from the cache or WMS server if not found in the cache.
"""
if self.enableCache:
def __doGet():
"""Makes request for capabilities.
"""
log.debug("WMS capabilities not found in cache for %s" % search_param)
return GetWebMapCapabilities(search_param)
search_param = wmsurl
if forceRefresh:
self.cache.remove_value(key = search_param)
log.debug("Looking for WMS capabilities in cache for %s" % search_param)
return self.cache.get(key = search_param, createfunc = __doGet)
else:
log.debug("Fetching WMS capabilities for %s (caching disabled)" % wmsurl)
return GetWebMapCapabilities(wmsurl)
|
cragwen/hello-world
|
py/interpy/4_MapFilterReduce.py
|
Python
|
unlicense
| 542
| 0.012915
|
items = [1, 2, 3, 4, 5]
squared = []
for i in items:
|
squared.append(i**2)
print(squared)
squared = []
squared = list(map(lambda x: x**2, items))
print(squared)
def multiply(x):
return (x*x)
def add(x):
return (x+x)
funcs = [multiply, add]
for i in range(5):
value = map(lambda x:x(i), funcs)
print(list(value))
number_list = range(-5, 5)
less_than_zero = filter(lambda x: x < 0, number_list)
print(list(less_than_zer
|
o))
from functools import reduce
product = reduce( (lambda x, y: x * y), [1, 2, 3, 4])
print(product)
|
traverseda/python-client
|
test/test_concurrency.py
|
Python
|
apache-2.0
| 341
| 0
|
from nose.tools import with_setup, eq_ as eq
from common import vim, cleanup
from threading import Timer
@with_setup(setup=cleanup)
def test_interrupt_from_another_thread():
session = vim.session
|
timer = Timer(0.5, lambda: session.threadsafe_call(lambda: session.stop()))
timer.start()
eq(vim.session.nex
|
t_message(), None)
|
nwjs/chromium.src
|
mojo/public/tools/mojom/mojom_parser.py
|
Python
|
bsd-3-clause
| 19,670
| 0.00788
|
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses mojom IDL files.
This script parses one or more input mojom files and produces corresponding
module files fully describing the definitions contained within each mojom. The
module data is pickled and can be easily consumed by other tools to, e.g.,
generate usable language bindings.
"""
import argparse
import builtins
import codecs
import errno
import json
import logging
import multiprocessing
import os
import os.path
import sys
import traceback
from collections import defaultdict
from mojom.generate import module
from mojom.generate import translate
from mojom.parse import parser
from mojom.parse import conditional_features
# Disable this for easier debugging.
# In Python 2, subprocesses just hang when exceptions are thrown :(.
_ENABLE_MULTIPROCESSING = sys.version_info[0] > 2
if sys.version_info < (3, 4):
_MULTIPROCESSING_USES_FORK = sys.platform.startswith('linux')
else:
# https://docs.python.org/3/library/multiprocessing.html#:~:text=bpo-33725
if __name__ == '__main__' and sys.platform == 'darwin':
multiprocessing.set_start_method('fork')
_MULTIPROCESSING_USES_FORK = multiprocessing.get_start_method() == 'fork'
def _ResolveRelativeImportPath(path, roots):
"""Attempts to resolve a relative import path against a set of possible roots.
Args:
path: The relative import path to resolve.
roots: A list of absolute paths which will be checked in descending length
order for a match against path.
Returns:
A normalized absolute path combining one of the roots with the input path if
and only if such a file exists.
Raises:
ValueError: The path could not be resolved against any of the given roots.
"""
for root in reversed(sorted(roots, key=len)):
abs_path = os.path.join(root, path)
if os.path.isfile(abs_path):
return os.path.normcase(os.path.normpath(abs_path))
raise ValueError('"%s" does not exist in any of %s' % (path, roots))
def _RebaseAbsolutePath(path, roots):
"""Rewrites an absolute file path as relative to an absolute directory path in
roots.
Args:
path: The absolute path of an existing file.
roots: A list of absolute directory paths. The given path argument must fall
within one of these directories.
Returns:
A path equivalent to the input path, but relative to one of the provided
roots. If the input path falls within multiple roots, the longest root is
chosen (and thus the shortest relative path is returned).
Paths returned by this method always use forward slashes as a separator to
mirror mojom import syntax.
Raises:
ValueError if the given path does not fall within any of the listed roots.
"""
assert os.path.isabs(path)
assert os.path.isfile(path)
assert all(map(os.path.isabs, roots))
sorted_roots = list(reversed(sorted(roots, key=len)))
def try_rebase_path(path, root):
head, rebased_path = os.path.split(path)
while head != root:
head, tail = os.path.split(head)
if not tail:
return None
rebased_path = os.path.join(tail, rebased_path)
return rebased_path
for root in sorted_roots:
relative_path = try_rebase_path(path, root)
if relative_path:
# TODO(crbug.com/953884): Use pathlib for this kind of thing once we're
# fully migrated to Python 3.
return relative_path.replace('\\', '/')
raise ValueError('%s does not fall within any of %s' % (path, sorted_roots))
def _GetModuleFilename(mojom_filename):
return mojom_filename + '-module'
def _EnsureInputLoaded(mojom_abspath, module_path, abs_paths, asts,
dependencies, loaded_modules, module_metadata):
"""Recursively ensures that a module and its dependencies are loaded.
Args:
mojom_abspath: An absolute file path pointing to a mojom file to load.
module_path: The relative path used to identify mojom_abspath.
abs_paths: A mapp
|
ing from module paths to absolute file paths for all
inputs given to this execution of the script.
asts: A map from each input mojom's absolute path to its parsed AST.
dependencies: A mapping of which input mojoms depend on each other, indexed
by absolute file path.
loaded_modules: A mapping of all modules loade
|
d so far, including non-input
modules that were pulled in as transitive dependencies of the inputs.
module_metadata: Metadata to be attached to every module loaded by this
helper.
Returns:
None
On return, loaded_modules will be populated with the loaded input mojom's
Module as well as the Modules of all of its transitive dependencies."""
if mojom_abspath in loaded_modules:
# Already done.
return
for dep_abspath, dep_path in sorted(dependencies[mojom_abspath]):
if dep_abspath not in loaded_modules:
_EnsureInputLoaded(dep_abspath, dep_path, abs_paths, asts, dependencies,
loaded_modules, module_metadata)
imports = {}
for imp in asts[mojom_abspath].import_list:
path = imp.import_filename
imports[path] = loaded_modules[abs_paths[path]]
loaded_modules[mojom_abspath] = translate.OrderedModule(
asts[mojom_abspath], module_path, imports)
loaded_modules[mojom_abspath].metadata = dict(module_metadata)
def _CollectAllowedImportsFromBuildMetadata(build_metadata_filename):
allowed_imports = set()
processed_deps = set()
def collect(metadata_filename):
processed_deps.add(metadata_filename)
with open(metadata_filename) as f:
metadata = json.load(f)
allowed_imports.update(
map(os.path.normcase, map(os.path.normpath, metadata['sources'])))
for dep_metadata in metadata['deps']:
if dep_metadata not in processed_deps:
collect(dep_metadata)
collect(build_metadata_filename)
return allowed_imports
# multiprocessing helper.
def _ParseAstHelper(mojom_abspath, enabled_features):
with codecs.open(mojom_abspath, encoding='utf-8') as f:
ast = parser.Parse(f.read(), mojom_abspath)
conditional_features.RemoveDisabledDefinitions(ast, enabled_features)
return mojom_abspath, ast
# multiprocessing helper.
def _SerializeHelper(mojom_abspath, mojom_path):
module_path = os.path.join(_SerializeHelper.output_root_path,
_GetModuleFilename(mojom_path))
module_dir = os.path.dirname(module_path)
if not os.path.exists(module_dir):
try:
# Python 2 doesn't support exist_ok on makedirs(), so we just ignore
# that failure if it happens. It's possible during build due to races
# among build steps with module outputs in the same directory.
os.makedirs(module_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(module_path, 'wb') as f:
_SerializeHelper.loaded_modules[mojom_abspath].Dump(f)
class _ExceptionWrapper:
def __init__(self):
# Do not capture exception object to ensure pickling works.
self.formatted_trace = traceback.format_exc()
class _FuncWrapper:
"""Marshals exceptions and spreads args."""
def __init__(self, func):
self._func = func
def __call__(self, args):
# multiprocessing does not gracefully handle excptions.
# https://crbug.com/1219044
try:
return self._func(*args)
except: # pylint: disable=bare-except
return _ExceptionWrapper()
def _Shard(target_func, arg_list, processes=None):
arg_list = list(arg_list)
if processes is None:
processes = multiprocessing.cpu_count()
# Seems optimal to have each process perform at least 2 tasks.
processes = min(processes, len(arg_list) // 2)
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
processes = min(processes, 56)
# Don't spin up processes unless there is enough work to merit doing so.
if not _ENABLE_MULTIPROCESSING or processes < 2:
for arg_tuple in arg_list:
yield target_func(*arg_tuple)
return
pool = multiproces
|
pu239ppy/authentic2
|
authentic2/decorators.py
|
Python
|
agpl-3.0
| 764
| 0.003927
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from functools import wraps
TRANSIENT_USER_TYPES = []
def is_transient_user(user):
return isinstance(user, tuple(TRANSIENT_USER_TYPES))
def prevent_access_to_transient_users(view_func):
def _wrapped_view(request, *args,
|
**kwargs):
'''Test if the user is transient'''
for user_type in TRANSIENT_USER_TYPES:
if is_transient_user(request.user):
return HttpResponseRedirect('/')
return view_func(request, *args, **kwargs)
return login_required(wraps(view_func)(_wrapped_view))
def to_list(func):
@wraps(func)
def f(*args, **kwargs):
return
|
list(func(*args, **kwargs))
return f
|
sserrot/champion_relationships
|
venv/Lib/site-packages/pip/_internal/index/collector.py
|
Python
|
mit
| 22,838
| 0
|
"""
The main purpose of this module is to expose LinkCollector.collect_links().
"""
import cgi
import functools
import itertools
import logging
import mimetypes
import os
import re
from collections import OrderedDict
from pip._vendor import html5lib, requests
from pip._vendor.distlib.compat import unescape
from pip._vendor.requests.exceptions import RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url, url_to_path
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import (
Callable, Iterable, List, MutableMapping, Optional,
Protocol, Sequence, Tuple, TypeVar, Union,
)
import xml.etree.ElementTree
from pip._vendor.requests import Response
from pip._internal.network.session import PipSession
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
# Used in the @lru_cache polyfill.
F = TypeVar('F')
class LruCache(Protocol):
def __call__(self, maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
raise NotImplementedError
logger = logging.getLogger(__name__)
# Fallback to noop_lru_cache in Python 2
# TODO: this can be removed when python 2 support is dropped!
def noop_lru_cache(maxsize=None):
# type: (Optional[int]) -> Callable[[F], F]
def _wrapper(f):
# type: (F) -> F
return f
return _wrapper
_lru_cache = getattr(functools, "
|
lru_cache", noop_lru_cache) # type: LruCache
def _match_vcs_scheme(url):
# type: (str) -> Optional[str]
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in
|
vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
def _is_url_like_archive(url):
# type: (str) -> bool
"""Return whether the URL looks like an archive.
"""
filename = Link(url).filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
return True
return False
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
# type: (str, str) -> None
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
# type: (Response) -> None
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
# type: (str, PipSession) -> None
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
raise_for_status(resp)
_ensure_html_header(resp)
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if _is_url_like_archive(url):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
raise_for_status(resp)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers):
# type: (ResponseHeaders) -> Optional[str]
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part):
# type: (str) -> str
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib_parse.quote(urllib_parse.unquote(part))
def _clean_file_url_path(part):
# type: (str) -> str
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib_request.pathname2url(urllib_request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path, is_lo
|
svera/clouddump
|
tools.py
|
Python
|
gpl-2.0
| 1,226
| 0.006525
|
import json
import sys
import logging
import logging.handlers
def load_config():
'''Loads application configuration from a JSON file'''
try:
json_data = open('config.json')
config = json.load(json_data)
json_data.close()
return config
except Exception:
print """There was an error loading config.json.
Make sure that the file exists and it's a valid JSON file."""
sys.exit(1)
def init_logger(file_name='clouddump.log'):
'''
Initializes the logging file and module
parameters
----------
file_name: A string with the name of the file to write the logs in
'''
logger = logging.
|
getLogger('clouddump')
log_file_handler = logging.handlers.RotatingFileHandler(
file_
|
name, maxBytes = 10**9)
log_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_file_handler.setFormatter(log_format)
logger.addHandler(log_file_handler)
logger.setLevel(logging.DEBUG)
if len(sys.argv) > 1:
if sys.argv[1] == '-v' or sys.argv[1] == '--verbose':
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(console)
|
rero/reroils-app
|
tests/api/patron_transactions/test_patron_transactions_permissions.py
|
Python
|
gpl-2.0
| 7,427
| 0
|
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2020 RERO
# Copyright (C) 2020 UCLouvain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mock
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
from utils import get_json
from rero_ils.modules.patron_transactions.permissions import \
PatronTransactionPermission
def test_pttr_permissions_api(client, patron_martigny,
system_librarian_martigny,
librarian_martigny,
patron_transaction_overdue_martigny,
patron_transaction_overdue_saxon,
patron_transaction_overdue_sion):
"""Test patron transactions permissions api."""
pttr_permissions_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions'
)
pttr_martigny_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_martigny.pid
)
pttr_saxon_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_saxon.pid
)
pttr_sion_permission_url = url_for(
'api_blueprint.permissions',
route_name='patron_transactions',
record_pid=patron_transaction_overdue_sion.pid
)
# Not logged
res = client.get(pttr_permissions_url)
assert res.status_code == 401
# Logged as patron
login_user_via_session(client, patron_martigny.user)
res = client.get(pttr_permissions_url)
assert res.status_code == 403
# Logged as librarian
# * lib can 'list' and 'read' pttr of its own organisation
# * lib can 'create', 'update', 'delete' only for its library
# * lib can't 'read' acq_account of others organisation.
# * lib can't 'create', 'update', 'delete' acq_account for other org/lib
login_user_via_session(client, librarian_martigny.user)
res = client.get(pttr_martigny_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['create']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert data['delete']['can']
res = client.get(pttr_saxon_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert not data['delete']['can']
res = client.get(pttr_sion_permission_url)
assert res.status_code == 200
data = get_json(res)
assert not data['read']['can']
assert data['list']['can']
assert not data['update']['can']
assert not data['delete']['can']
# Logged as system librarian
# * sys_lib can do everything about pttr of its own organisation
# * sys_lib can't do anything about pttr of other organisation
login_user_via_session(client, system_librarian_martigny.user)
res = client.get(pttr_saxon_permission_url)
assert res.status_code == 200
data = get_json(res)
assert data['read']['can']
assert data['list']['can']
assert data['create']['can']
assert data['update']['can']
# 'delete' should be true but return false because an event is linked
# assert data['delete']['can']
res = client.get(pttr_sion_perm
|
ission_url)
assert res.status_code == 200
data = get_json(res)
assert not data['read']['can']
assert not data['update']['can']
assert not data['delete']['can']
def test_pttr_permissions(patron_martig
|
ny,
librarian_martigny,
system_librarian_martigny,
org_martigny, patron_transaction_overdue_saxon,
patron_transaction_overdue_sion,
patron_transaction_overdue_martigny):
"""Test patron transaction permissions class."""
# Anonymous user
assert not PatronTransactionPermission.list(None, {})
assert not PatronTransactionPermission.read(None, {})
assert not PatronTransactionPermission.create(None, {})
assert not PatronTransactionPermission.update(None, {})
assert not PatronTransactionPermission.delete(None, {})
# As Patron
pttr_m = patron_transaction_overdue_martigny
pttr_sa = patron_transaction_overdue_saxon
pttr_si = patron_transaction_overdue_sion
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_patrons',
[patron_martigny]
):
assert PatronTransactionPermission.list(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_m)
assert not PatronTransactionPermission.create(None, pttr_m)
assert not PatronTransactionPermission.update(None, pttr_m)
assert not PatronTransactionPermission.delete(None, pttr_m)
# As Librarian
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_librarian',
librarian_martigny
):
assert PatronTransactionPermission.list(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_m)
assert PatronTransactionPermission.create(None, pttr_m)
assert PatronTransactionPermission.update(None, pttr_m)
assert PatronTransactionPermission.delete(None, pttr_m)
assert PatronTransactionPermission.read(None, pttr_sa)
assert PatronTransactionPermission.create(None, pttr_sa)
assert PatronTransactionPermission.update(None, pttr_sa)
assert PatronTransactionPermission.delete(None, pttr_sa)
assert not PatronTransactionPermission.read(None, pttr_si)
assert not PatronTransactionPermission.create(None, pttr_si)
assert not PatronTransactionPermission.update(None, pttr_si)
assert not PatronTransactionPermission.delete(None, pttr_si)
# As System-librarian
with mock.patch(
'rero_ils.modules.patron_transactions.permissions.current_librarian',
system_librarian_martigny
):
assert PatronTransactionPermission.list(None, pttr_sa)
assert PatronTransactionPermission.read(None, pttr_sa)
assert PatronTransactionPermission.create(None, pttr_sa)
assert PatronTransactionPermission.update(None, pttr_sa)
assert PatronTransactionPermission.delete(None, pttr_sa)
assert not PatronTransactionPermission.read(None, pttr_si)
assert not PatronTransactionPermission.create(None, pttr_si)
assert not PatronTransactionPermission.update(None, pttr_si)
assert not PatronTransactionPermission.delete(None, pttr_si)
|
electrolinux/pootle
|
pootle/core/search/broker.py
|
Python
|
gpl-3.0
| 2,440
| 0.002049
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the L
|
ICENSE file for a copy of the license and the
# AUTHORS file for copyrig
|
ht and authorship information.
from . import SearchBackend
import importlib
import logging
class SearchBroker(SearchBackend):
def __init__(self, config_name=None):
super(SearchBroker, self).__init__(config_name)
self._servers = {}
if self._settings is None:
return
for server in self._settings:
if config_name is None or server in config_name:
try:
_module = '.'.join(self._settings[server]['ENGINE'].split('.')[:-1])
_search_class = self._settings[server]['ENGINE'].split('.')[-1]
except KeyError:
logging.warning("Search engine '%s' is missing the required "
"'ENGINE' setting" % server)
break
try:
module = importlib.import_module(_module)
try:
self._servers[server] = getattr(module, _search_class)(server)
except AttributeError:
logging.warning("Search backend '%s'. No search class "
"'%s' defined." % (server, _search_class))
except ImportError:
logging.warning("Search backend '%s'. Cannot import '%s'" %
(server, _module))
def search(self, unit):
if not self._servers:
return []
results = []
counter = {}
for server in self._servers:
for result in self._servers[server].search(unit):
translation_pair = result['source'] + result['target']
if translation_pair not in counter:
counter[translation_pair] = result['count']
results.append(result)
else:
counter[translation_pair] += result['count']
for item in results:
item['count'] = counter[item['source']+item['target']]
return results
def update(self, language, obj):
for server in self._servers:
self._servers[server].update(language, obj)
|
maaaaz/androwarn
|
warn/search/manifest/manifest.py
|
Python
|
lgpl-3.0
| 4,316
| 0.01089
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of Androwarn.
#
# Copyright (C) 2012, 2019, Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# Androwarn is free software: you can redistribute it
|
and/or modify
# it under the terms of the GNU Les
|
ser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androwarn is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androwarn. If not, see <http://www.gnu.org/licenses/>.
# Global imports
import logging
import codecs
import pprint
# Logguer
log = logging.getLogger('log')
def grab_main_activity(apk) :
"""
@param apk : an APK instance
@rtype : the name of the main activity
"""
return apk.get_main_activity()
def grab_activities(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all activities
"""
return apk.get_activities()
def grab_services(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all services
"""
return apk.get_services()
def grab_receivers(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all receivers
"""
return apk.get_receivers()
def grab_providers(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all providers
"""
return apk.get_providers()
def grab_permissions(apk) :
"""
@param apk : an APK instance
@rtype : a list of permissions
"""
'''
result = ["Asked: %s" % "\n".join(sorted(apk.get_permissions())),
"Implied: %s" % apk.get_uses_implied_permission_list(),
"Declared: %s" % apk.get_declared_permissions()]
'''
result = ["Asked: %s" % pprint.pformat(sorted(apk.get_permissions())),
"Implied: %s" % pprint.pformat(sorted(apk.get_uses_implied_permission_list())),
"Declared: %s" % pprint.pformat(sorted(apk.get_declared_permissions()))]
return result
def grab_features(apk) :
"""
@param apk : an APK instance
@rtype : a list of features
"""
return list(apk.get_features())
def grab_libraries(apk) :
"""
@param apk : an APK instance
@rtype : the libraries' names
"""
return list(apk.get_libraries())
def grab_file_list(apk) :
"""
@param apk : an APK instance
@rtype : the file list inside the AP
"""
return apk.get_files()
def grab_certificate_information(apk) :
"""
@param apk : an APK instance
@rtype : a certificate object by giving the name in the apk file
"""
cert_info = []
cert_info.append("APK is signed: %s\n" % apk.is_signed())
for index,cert in enumerate(apk.get_certificates()):
cert_info.append("Certificate #%s" % index)
cert_info_issuer = ["Issuer:", cert.issuer.human_friendly]
cert_info_subject = ["Subject:", cert.subject.human_friendly]
cert_info.extend(cert_info_issuer)
cert_info.extend(cert_info_subject)
cert_info.append("Serial number: %s" % cert.serial_number)
cert_info.append("Hash algorithm: %s" % cert.hash_algo)
cert_info.append("Signature algorithm: %s" % cert.signature_algo)
cert_info.append("SHA-1 thumbprint: %s" % codecs.encode(cert.sha1, 'hex').decode())
cert_info.append("SHA-256 thumbprint: %s" % codecs.encode(cert.sha256, 'hex').decode())
cert_info.append("")
return cert_info
def grab_sdk_versions(apk) :
result = ["Declared target SDK: %s" % apk.get_target_sdk_version(),
"Effective target SDK: %s" % apk.get_effective_target_sdk_version(),
"Min SDK: %s" % apk.get_min_sdk_version(),
"Max SDK: %s" % apk.get_max_sdk_version()]
return result
|
idekerlab/py2cytoscape
|
py2cytoscape/data/session_client.py
|
Python
|
mit
| 839
| 0.001192
|
import requests
import warnings
warnings.warn('\n\n\n**** data.session_client will be deprecated in the next py2cytoscape release. ****\n\n\n')
class SessionClient(object):
def __init__(self, url):
self.__url = url + 'session'
def delete(self):
requests.delete(self.__url)
def save(self, file_name=None):
if file_name is None:
raise ValueError('Session file name is required.')
post_url = self.__url
params = {'file': file_name}
res = requests.post(post_url, params=params)
return res
def open(self, file_name=
|
None):
if file_name is No
|
ne:
raise ValueError('Session file name is required.')
get_url = self.__url
params = {'file': file_name}
res = requests.get(get_url, params=params)
return res
|
renegelinas/mi-instrument
|
mi/dataset/driver/flord_g/ctdbp_p/dcl/test/test_flord_g_ctdbp_p_dcl_recovered_driver.py
|
Python
|
bsd-2-clause
| 893
| 0.003359
|
import os
|
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.ctdbp_p.dcl.resource import RESOURCE_PATH
from mi.dataset.driver.flord_g.ctdbp_p.dcl.flord_g_ctdbp_p_dcl_recovered_dri
|
ver import parse
_author__ = 'jeff roy'
log = get_logger()
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'ctdbp01_20150804_061734.DAT')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
|
justasabc/kubernetes-ubuntu
|
smartfootball/okooo/okooo_setting.py
|
Python
|
apache-2.0
| 796
| 0.023869
|
from setting import MATCH_TYPE_JC,MATCH_TYPE_M14
#url_m14_fmt = "http://www.okooo.com/livecenter/zucai/?mf=ToTo&date=15077"
#url_jc_fmt = "http://www.okooo.com/livecenter/jingcai/?date=2015-05-26"
url_jc_fmt = "http://www.okooo.com/livecenter/jingcai/?date={0}"
url_m14_fmt = "http://www.okooo.com/livecenter/zucai/?mf=ToTo&date={0}"
#url_jc_odds_change = "http://www.okooo.com/soccer/match/736957/odds/change/2/"
url_jc_odds_change_fmt = "http://www.okooo.com/soccer/match/{0}/odds/change/{1}/"
d
|
ef get_url_jc(dt):
dt_str = dt.strftime("%Y-%m-%d")
return url_jc_fmt.format(dt_str)
def get_url_m14(sid):
return url_jc_m14.format(sid)
def get_url_odds_change(okooo_id,bookmaker_id=2):
return url_jc_odds_change_fmt.format(okooo_id,bookmaker_id)
OKOOO_BOOKMAKER_DATA = {
"jingcai":2,
}
| |
harisibrahimkv/wye
|
wye/workshops/mixins.py
|
Python
|
mit
| 6,362
| 0.000629
|
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from wye.base.constants import WorkshopStatus, FeedbackType
from wye.base.emailer import send_mail
from wye.organisations.models import Organisation
from wye.profiles.models import Profile
from wye.regions.models import RegionalLead
from .models import Workshop, WorkshopFeedBack
class WorkshopAccessMixin(object):
def dispatch(self, request, *args, **kwargs):
user = request.user
pk = self.kwargs.get(self.pk_url_kwarg, None)
workshop = Workshop.objects.get(id=pk)
is_admin = Profile.is_admin(user)
is_lead = (Profile.is_regional_lead(user) and
RegionalLead.is_regional_lead(user, workshop.location))
is_organiser = (Profile.is_organiser(user) and
user in workshop.requester.user.all())
if not (is_admin or is_lead or is_organiser):
return HttpResponseForbidden("Not sufficent permission")
return super(WorkshopAccessMixin, self).dispatch(request, *args, **kwargs)
class WorkshopFeedBackMixin(object):
"""
Restrict access to feedback url if
- Workshop is not completed
- If the user accessing the url is not presenter or
organiser
"""
def dispatch(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
workshop = Workshop.objects.get(id=pk)
user = self.request.user
if workshop.status != WorkshopStatus.COMPLETED:
raise Http404
if not (workshop.is_presenter(user) or workshop.is_organiser(user)):
raise PermissionDenied
return super(WorkshopFeedBackMixin, self).dispatch(request, *args, **kwargs)
class WorkshopRestrictMixin(object):
"""
Mixin to restrict
- For organisation to add workshop if no feedback is shared.
- For presenter to takeup workshop if no feedback is shared
"""
allow_presenter = False
def dispatch(self, request, *args, **kwargs):
self.user = request.user
self.feedback_required = []
# check if user is tutor
if Profile.is_presenter(self.user) and self.allow_presenter:
self.validate_presenter_feedback()
elif (Profile.is_organiser(self.user) and
Organisation.list_user_organisations(self.user).exists()):
# if user is from organisation
self.validate_organisation_feedback()
elif (Profile.is_regional_lead(self.user) or
Profile.is_admin(self.user)):
pass # don't restrict lead and admin
else:
msg = """
To request workshop you need to create organisaiton.\n\n
Please use organisation tab above to create your organisation"""
# return json for ajax request
return render(request, 'error.html', {'message': msg})
if self.feedback_required:
return self.return_response(request)
return super(WorkshopRestrictMixin, self).dispatch(request, *args, **kwargs)
def validate_presenter_feedback(self):
workshops = Workshop.objects.filter(
presenter=self.user, status=WorkshopStatus.COMPLETED)
for workshop in workshops:
feedback = WorkshopFeedBack.objects.filter(
workshop=workshop, feedback_type=FeedbackType.PRESENTER
).count()
if feedback == 0:
self.feedback_required.append(workshop)
def validate_organisation_feedback(self):
workshops = Workshop.objects.filter(
requester__user=self.user, status=WorkshopStatus.COMPLETED)
for workshop in workshops:
feedback = WorkshopFeedBack.objects.filter(
workshop=workshop, feedback_type=FeedbackType.ORGANISATION
).count()
if feedback == 0:
self.feedback_required.append(workshop)
def return_response(self, request):
msg = "Please complete the feeback for %s" % (
", ".join(map(str, self.feedback_required)))
# return json for ajax request
if request.is_ajax():
return JsonResponse({"status": False, "msg": msg})
messages.error(request, msg)
return HttpResponseRedirect(reverse('workshops:workshop_list'))
class WorkshopEmailMixin(object):
def send_mail_to_presenter(self, user, context):
"""
Send email to presenter.
@param user: Is user object
@param context: Is dict of data required by email template.
"""
# Send email to presenter
return send_mail([user.email], context, self.email_dir)
def send_mail_to_group(self, context, exclude_emails=None):
"""
Send email to org/group users.
@param context: Is dict of data required by email template.
@exclude_emails: Is list of email to be excluded from
|
email update.
"""
if exclude_emails is None:
exclude_emails = []
# Collage POC and admin email
poc_admin_user = Profile.get_user_with_type(
user_type=['Collage POC', 'admin']
).values_list('email', flat=True)
# Org user email
org_user_emails = self.object.requ
|
ester.user.filter(
is_active=True
).values_list('email', flat=True)
# all presenter if any
all_presenter_email = self.object.presenter.values_list(
'email', flat=True
)
# List of tutor who have shown interest in that location
region_interested_member = Profile.objects.filter(
interested_locations=self.object.requester.location,
usertype__slug='tutor'
).values_list('user__email', flat=True)
all_email = []
all_email.extend(org_user_emails)
all_email.extend(all_presenter_email)
all_email.extend(poc_admin_user)
all_email.extend(region_interested_member)
all_email = set(all_email)
all_email = list(all_email.difference(exclude_emails))
send_mail(all_email, context, self.email_dir)
|
dimagi/commcare-hq
|
corehq/form_processor/migrations/0026_caseforms_to_casetransaction.py
|
Python
|
bsd-3-clause
| 1,422
| 0.00211
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0025_caseforms_server_date'),
]
operations = [
migrations.CreateModel(
name='CaseTransaction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('form_uuid', models.CharField(max_length=255)),
('server_date', models.DateTimeField()),
('type', models.PositiveSmallIntegerField(choices=[(0, 'form'), (1, 'rebuild')])),
('case', models.ForeignKey(related_query_name='xform', related_name='xform_set', db_column='case_uuid', to_field='case_uuid', to='form_processor.CommCareCaseSQL
|
', db_index=False, on_delete=models.CASCADE)),
],
options={
'ordering': ['server_date'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='caseforms',
unique_together=None,
),
migrations.RemoveField(
model_name='caseforms',
name='case',
),
migrations.DeleteModel(
name
|
='CaseForms',
),
migrations.AlterUniqueTogether(
name='casetransaction',
unique_together=set([('case', 'form_uuid')]),
),
]
|
vmdowney/oclc-udev
|
oclc_udev.py
|
Python
|
mit
| 7,800
| 0.010128
|
# This script only works for OCLC UDEV reports created after December 31, 2015
import csv
import datetime
import re
import requests
import sys
import time
from lxml import html
user_date = raw_input('Enter report month and year (mm/yyyy) or year only (yyyy): ')
if len(user_date) == 7: # For running a report for a single month
try:
input_date = datetime.datetime.strptime(user_date, '%m/%Y')
input_month = input_date.strftime('%b')
input_year = input_date.strftime('%Y')
except ValueError:
print 'Report month and year %s is not in mm/yyyy format. Please run again.' % (user_date)
sys.exit(1)
elif len(user_date) == 4: # For running a report for an entire year
input_date = datetime.datetime.strptime(user_date, '%Y')
input_year = input_date.strftime('%Y')
else:
print 'Report month and year %s is not in mm/yyyy or yyyy format. Please run again.' % (user_date)
sys.exit(1)
print '%s is running ...' % (sys.argv[0])
url = 'http://lms01.harvard.edu/oclc-project/udev/'
r = requests.get(url)
doc = html.fromstring(r.text)
index_links = doc.xpath('//a/@href') #Get all links
report_links = []
report_log = []
for index_link in index_links:
if len(user_date)==7 and (input_month in index_link and input_year in index_link) or len(user_date)==4 and input_year in index_link: #Find links on index page that match the user's input date
index_link = url + index_link
r = requests.get(index_link)
doc = html.fromstring(r.text)
page_links = doc.xpath('//a/@href') #Get report links on each dated report page
for page_link in page_links:
page_link = index_link + '/' + page_link
report_links.append(page_link)
oclc_symbol = ['HHG'] #List of OCLC symbols to match in records; separate codes with commas; put '' in list to retrieve all OCLC symbols
output_data = []
for report_link in report_links: #Process each report
report_date = report_link[report_link.find('/d')+2:report_link.find('/d')+9]
report_date = '20%s-%s-%s' % (report_date[:2], report_date[2:4], report_date[4:6])
r = requests.get(report_link)
content = r.text
count_949 = 0 # Count the number of 949 fields and log for troubleshooting
for symbol in oclc_symbol:
count_949 = count_949 + len(content.split('=l'+ symbol))-1 # Count the number of 949 fields and log for troubleshooting
report_log.append([report_date, count_949])
if count_949 > 0: # Only process reports that have records with relevant holdings
content = content[:content.find('\n \n PROCESSING SUMMARY STATISTICS')] # Remove report footer
content = content.replace('\n \n ERROR SEVERITY','\n ERROR SEVERITY') # Remove double line break before 'ERROR SEVERITY' note
content = content.replace('\n \n LDR','\n LDR') # Remove double line break before 'LDR'
content = content.replace('\n \n RECORD','\n RECORD') # Remove double line break before 'RECORD REJECTED' note
records = content.split('\n \n')
for record in records:
if any(symbol in record for symbol in ['=l'+ symbol for symbol in oclc_symbol]): # Only process records with holdings for selected OCLC symbols
lines = []
lines = record.split('\n')
record_data = []
title = []
last_tag = ''
for line in lines:
errors = {}
tag = line[8:11]
line_no = line[18:22]
if line.startswith(' ERROR:'):
errors['Error'] = line[8:]
errors['Report Date'] = report_date
errors['Report Filename'] = report_link
errors['Error Line'] = ''
if re.findall(r'\d{3}\sFIELD', line):
errors['Error Field'] = re.findall(r'\d{3}\sFIELD', line)[-1].split(' ')[0]
elif re.findall(r'\sFIELD\s\d{3}', line):
errors['Error Field'] = re.findall(r'\sFIELD\s\d{3}', line)[-1].split(' ')[-1]
else:
errors['Error Field'] = ''
if re.findall(r'POSITION[S]?\s[\d-]+', line):
errors['Error Position'] = re.findall(r'POSITION[S]?\s[\d-]+', line)[0].split(' ')[1]
else:
errors['Error Position'] = ''
record_data.append(errors)
elif line.startswith(' ERROR SEVERITY') or line.startswith(' RECORD REJECTED'):
for data in record_data:
data['OCLC Status'] = line.strip()
elif tag == '001': # 001 field within line start ("|") and line end ("+|") indicators
for data in record_data:
data['Bib No'] = line[27:-2].split('-')[1]
data['Hol No'] = al
|
eph_hol = line[27:-2].split('-')[0]
elif tag == '949':
for data in record_data:
data['Library'] = line[line.index('=l')+2:-2] # 949 field within subfield l and line end indicator ("+|")
else:
if tag == ' ':
tag = last_tag
for data in record_data:
|
if tag == data['Error Field']:
if tag == '008' or tag == '006':
data['Error Line'] = line[27:].rstrip('|').rstrip('+')
elif data['Error Position'] == '':
data['Error Line'] = line[27:].rstrip('|').rstrip('+')
else:
if int(line_no) <= int(data['Error Position']) <= int(line_no) + len(line[27: ].rstrip('|')):
data['Error Line'] = line[27:].rstrip('|').rstrip('+')
if tag == '245':
title.append(line[27:].rstrip('|').rstrip('+'))
elif tag == ' ' and last_tag == '245':
title += [line[27:].rstrip('|').rstrip('+')]
if line[8:11] != ' ':
last_tag = line[8:11]
if title:
title = ''.join(title) # join parts of the title
title = ' '.join(re.split(r'=[a-z0-9]', title)) # remove subfield markers and tags
for data in record_data:
data['Title'] = title#.strip() temporarily commented out--strip causes error if 245 is missing
output_data += record_data
print 'Report for OCLC Symbol(s):', ', '.join(oclc_symbol)
report_log.sort()
for log in report_log:
print 'Report Date:', log[0], 'Record Count:', log[1]
if len(user_date) == 7:
f = 'OCLC_UDEV_%s_%s.csv' % (input_year, input_date.strftime('%m')) #Changes format of month in input date for filename
elif len(user_date) == 4:
f = 'OCLC_UDEV_%s.csv' % (input_year) #For an annual report
with open(f,'wb') as output:
fieldnames = ['Report Date', 'Library', 'Bib No', 'Hol No', 'Title', 'Error', 'Error Field', 'Error Position', 'Error Line', 'OCLC Status', 'Report Filename']
writer = csv.DictWriter(output, delimiter=',', fieldnames=fieldnames)
writer.writeheader()
for data in output_data:
writer.writerow(data)
|
jkandasa/integration_tests
|
cfme/tests/openstack/cloud/test_volumes.py
|
Python
|
gpl-2.0
| 1,818
| 0.00055
|
"""Tests for Openstack cloud volumes"""
import fauxfactory
import pytest
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
pytestmark = [
pytest.mark.usefixtures("setup_provider_modscope"),
pytest.mark.provider([OpenStackProvider], scope='module')
]
VOLUME_SIZE = 1
@pytest.yield_fixture(scope='function')
def volume(appliance, provider):
collection = appliance.collections.volumes
storage_manager = '{} Cinder Manager'.format(provider
|
.name)
volume = collection.create(name=fauxfactory.gen_alpha(),
storage_manager=storage_manager,
tenant=provider.data['provisioning']['cloud_tenant'],
|
size=VOLUME_SIZE,
provider=provider)
yield volume
try:
if volume.exists:
volume.delete(wait=False)
except Exception:
logger.warning('Exception during volume deletion - skipping..')
@pytest.mark.meta(blockers=[BZ(1502609, forced_streams=["5.9"])])
def test_create_volume(volume, provider):
assert volume.exists
assert volume.size == '{} GB'.format(VOLUME_SIZE)
assert volume.tenant == provider.data['provisioning']['cloud_tenant']
@pytest.mark.meta(blockers=[BZ(1502609, forced_streams=["5.9"])])
def test_edit_volume(volume, appliance):
new_name = fauxfactory.gen_alpha()
volume.edit(new_name)
view = navigate_to(appliance.collections.volumes, 'All')
assert view.entities.get_entity(name=new_name, surf_pages=True)
@pytest.mark.meta(blockers=[BZ(1502609, forced_streams=["5.9"])])
def test_delete_volume(volume):
volume.delete()
assert not volume.exists
|
arviz-devs/arviz
|
arviz/tests/helpers.py
|
Python
|
apache-2.0
| 21,624
| 0.00148
|
# pylint: disable=redefined-outer-name, comparison-with-callable
"""Test helper functions."""
import gzip
import importlib
import logging
import os
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import cloudpickle
import numpy as np
import pytest
from _pytest.outcomes import Skipped
from packaging.version import Version
from ..data import InferenceData, from_dict
_log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
@pytest.fixture(scope="module")
def draws():
|
"""Share default draw count."""
return 500
@pytest.fixture(scope="module")
def chains():
"""Share default chain count."""
return 2
def create_model(seed=10):
"""Create model with fake data."""
np.ra
|
ndom.seed(seed)
nchains = 4
ndraws = 500
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, data["J"]),
"theta": np.random.randn(nchains, ndraws, data["J"]),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"]))}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
"max_depth": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, data["J"]),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, data["J"]) / 2,
"theta": np.random.randn(nchains, ndraws, data["J"]) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"])) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={
"y": ["obs_dim"],
"log_likelihood": ["obs_dim"],
"theta": ["school"],
"eta": ["school"],
},
coords={"obs_dim": range(data["J"])},
)
return model
def create_multidimensional_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
ndim1 = 5
ndim2 = 7
data = {
"y": np.random.normal(size=(ndim1, ndim2)),
"sigma": np.random.normal(size=(ndim1, ndim2)),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2),
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2)}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["dim1", "dim2"], "log_likelihood": ["dim1", "dim2"]},
coords={"dim1": range(ndim1), "dim2": range(ndim2)},
)
return model
def create_data_random(groups=None, seed=10):
"""Create InferenceData object using random data."""
if groups is None:
groups = ["posterior", "sample_stats", "observed_data", "posterior_predictive"]
rng = np.random.default_rng(seed)
data = rng.normal(size=(4, 500, 8))
idata_dict = dict(
posterior={"a": data[..., 0], "b": data},
sample_stats={"a": data[..., 0], "b": data},
observed_data={"b": data[0, 0, :]},
posterior_predictive={"a": data[..., 0], "b": data},
prior={"a": data[..., 0], "b": data},
prior_predictive={"a": data[..., 0], "b": data},
warmup_posterior={"a": data[..., 0], "b": data},
warmup_posterior_predictive={"a": data[..., 0], "b": data},
warmup_prior={"a": data[..., 0], "b": data},
)
idata = from_dict(
**{group: ary for group, ary in idata_dict.items() if group in groups}, save_warmup=True
)
return idata
@pytest.fixture()
def data_random():
"""Fixture containing InferenceData object using random data."""
idata = create_data_random()
return idata
@pytest.fixture(scope="module")
def models():
"""Fixture containing 2 mock inference data instances for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_model(seed=10)
model_2 = create_model(seed=11)
return Models()
@pytest.fixture(scope="module")
def multidim_models():
"""Fixture containing 2 mock inference data instances with multidimensional data for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_multidimensional_model(seed=10)
model_2 = create_multidimensional_model(seed=11)
return Models()
def check_multiple_attrs(
test_dict: Dict[str, List[str]], parent: InferenceData
) -> List[Union[str, Tuple[str, str]]]:
"""Perform multiple hasattr checks on InferenceData objects.
It is thought to first check if the parent object contains a given dataset,
and then (if present) check the attributes of the dataset.
Given the output of the function, all mismatches between expectation and reality can
be retrieved: a single string indicates a group mismatch and a tuple of strings
``(group, var)`` indicates a mismatch in the variable ``var`` of ``group``.
Parameters
----------
test_dict: dict of {str : list of str}
Its structure should be `{dataset1_name: [var1, var2], dataset2_name: [var]}`.
A ``~`` at the beginning of a dataset or variable name indicates the name NOT
being present must be asserted.
parent: InferenceData
InferenceData object on which to check the attributes.
Returns
-------
list
List containing the failed checks. It will contain either the dataset_name or a
tuple (dataset_name, var) for all non present attributes.
Examples
--------
The output below indicates that ``posterior`` group was expected but not found, and
variables ``a`` and ``b``:
["posterior", ("prior", "a"), ("prior", "b")]
Another example could be the following:
[("p
|
sburnett/seattle
|
repy/tests/ut_repytests_testremovefilefnf.py
|
Python
|
mit
| 88
| 0.022727
|
#
|
pragma error
#pragma repy
removefile("this.file.does.not.exist") # should fail (FNF
|
)
|
awsok/SaltAdmin
|
view/index.py
|
Python
|
gpl-2.0
| 10,160
| 0.012983
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from main import *
import time
import random
import urllib2
import json
#import os
def genToken(L):
CharLib = map(chr,range(97,123)+range(65,91)+range(48,58))
Str = []
for i in range(L):
Str += random.sample(CharLib,1)
return ''.join(Str)
# Key is md5 for string "xiaok"
key = 'db884468559f4c432bf1c1775f3dc9da'
# 加密UID
def encryptUID(id):
return key + str(id)
# 解密SID
def decryptUID(uStr):
return int(uStr.split('a')[1])
# 获取cookies
def getCookie(name):
ck = web.cookies()
if ck.get(name):
return ck.get(name)
else:
return None
# 创建会话
def genSession(SID,Username,ShowName,LastIP,LastLocation,LastDate,Token,Lstat,kpl):
LoginDate = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
Expiry = 86400
session = web.config._session
session.isLogin = True
session.SID = SID
session.Username = Username
session.ShowName = ShowName
session.LastLocation = LastLocation
# 登录是否正常返回
if Lstat == 'ok':
session.Lstat = '正常'
elif Lstat == 'other':
session.Lstat = '您的上次登录在别的电脑或者别的浏览器'
else:
session.Lstat = '未知'
# 获取客户端信息
#print 'HTTP_ENV: '
#print web.ctx.environ #来源地址
#print 'HTTP_REFERER: '
#print web.ctx.env.get('HTTP_REFERER', 'http://google.com')
#LoginHost = web.ctx.ip #这两种方法都能获取到客户端IP
LoginHost = web.ctx.environ['REMOTE_ADDR']
Agent = web.ctx.environ['HTTP_USER_AGENT']
# 测试解析
#LoginHost = '119.122.181.82'
# 本次登录地点判断
Location = 'Localhost'
ip = LoginHost.split('.')
if ip[0]+ip[1] in ['17216','192168','1270'] or ip[0] == '10':
Location = '本地局域网'
else:
# 这里要从公网去解析
url = "http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=" + LoginHost
response = urllib2.urlopen(url)
rt = json.load(response)
if rt['ret'] == 1 :
Location = rt['province'] + rt['city'] + ' [' + rt['isp'] + ']'
else:
Location = 'unkown'
# 登录日志写入数据库
if not Token:
# Token用来判断是否输入的用户名登录验证的还是从token验证过来的
Token = genToken(32)
if kpl == 'no':
Expiry = 0 # 不记住登录,设置数据库里存储的token的过期时间与登录时间相等
#db.query('''update users set loginhost="%s",logindate="%s" where id="%s"''' % (LoginHost,LoginDate,SID))
db.query('''insert into login_logs (uid,ip,location,agent,token,expiry) values ("%s","%s","%s","%s","%s",NOW() + INTERVAL %d SECOND)''' % (SID,LoginHost,Location,Agent,Token,Expiry))
db.query('''update users set loginfo=(select id from login_logs where uid="%s" and ip="%s" and token="%s" and status="yes" order by id desc limit 1) where id="%s"''' % (SID,LoginHost,Token,SID))
# 写入token到session,存储于服务器端
session.Token = Token
# 写入uid和token到cookies,存储于客户端
#web.setcookie('Username', Username, Expiry)
#用uid伪装成Username存储在cookies中
web.setcookie('Username', encryptUID(SID), Expiry)
web.setcookie('Token', Token, Expiry)
# 写入上次登录日期和IP到session
if LastDate:
# 格式化日期,加上年月日在前台显示,如果为None,表示用户是第一次登录
session.LastDate = time.strftime('%Y年%m月%d日 %H:%M:%S',time.strptime(str(LastDate),'%Y-%m-%d %H:%M:%S'))
else:
session.LastDate = '第一次登录'
session.LastIP = LastIP
# 写入当前登录日期和IP到数据库设计说明:
# 1.如果用户登录成功,就会从数据库获取上次登录的时间和IP,并写入session,然后立马把本次登录的IP和时间更新到数据库
# 2.还有一种方法就是用户登录时把本次登录的时间和IP写入session而先不动数据库里的记录,直到用户执行正常退出操作时再把session里存储的本次登录的信息写入数据库
# 3.第1个方法和第2个方法记录的数据是相反的,为什么不用第2种呢,因为万一用户不是正常退出呢,那数据库就不会更新本次登录的信息,所以...
# By Luxiaok 2014年4月7日 22:49:00
# 登录成功,这里执行DB操作应该要有异常处理的
# return True
class Login:
def GET(self,*args):
# URL做了多项正则匹配,要进行参数冗余处理,还不知道为什么url正则后会给GET传个参数进来
# 多余的参数就是匹配的url后缀
#print "Self =",self
#print "Args =",args
uid = getCookie('Username')
token = getCookie('Token')
sid = getCookie('xk_session')
HTTP_REFERER = getCookie('HTTP_REFERER')
#print 'Login referer from cookie: ',HTTP_REFERER
if uid and token:
#print 'uid =',uid
#print 'token =',token
#print 'sid =',sid
uid = decryptUID(uid)
try:
g = db.query('''
select U.id,U.username,U.nickname,U.loginfo,L.id as LID,L.ip,L.date from login_logs as L
left join users as U on L.uid=U.id
where U.id="%s" and L.token="%s" and L.status="yes" and L.expiry>now() and U.status="yes"''' % (uid,token))
except Exception,e:
print "MySQL Error: ",Exception,":",e
return "Database Error"
if g:
d = g[0]
Username = d.username
Lstat = 'ok' #是否异常登录反馈
if not d.nickname:
ShowName = d.username
else:
ShowName = d.nickname
if d.loginfo != d.LID:
g2 = db.query('''select L.ip,L.date from users as U left join login_logs as L on U.loginfo=L.id where U.id="%s"''' % uid)
d = g2[0]
# 这里还可以返回一个异地浏览器登录的提示
Lstat = "other" #上次登录在别的浏览器或者异地、异机
LastIP = d.ip
LastDate = d.date
genSession(uid,Username,ShowName,LastIP,LastDate,token,Lstat,kpl='yes')
if HTTP_REFERER:
web.setcookie('HTTP_REFERER', '88888888', -1000)
return web.seeother(HTTP_REFERER)
else:
return web.seeother("/dashboard")
else:
# 如果数据库里存储的token状态为no,即用户已经正常退出,会话无效了,那么清除本地cookies
web.setcookie('Username', '88888888', -1)
web.setcookie('Token', '88888888', -1)
if getLogin():
#SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
return render.dashboard(ShowName=ShowName)
else:
#return web.seeother("/login")
return render.login()
def POST(self,*args):
getPost = web.input()
#kpl = getPost.kpl # 是否记住登录
try:
getSQL = db.query('''select u.id,u.username,u.password,u.nickname,u.status,L.ip,L.location,L.date from users as u left join login_logs as L on u.loginfo=L.id where username="%s" and password=md5("%s")''' % (getPost.username,getPost.password))
except:
# 服务器(数据库)错误
return "false"
if getSQL:
# 获取登录数据
getData = getSQL[0]
SID = getData['id']
Username = getData['username']
Status = getData['status']
ShowName = getData['nickname']
LastDate = getData['date']
LastIP = getData['ip']
LastLocation = getData['location']
if not ShowName:
ShowName = Username
if Status == 'yes':
# 符合登录要求,登录数据写入session,创建会话
genSession(SID,Username,ShowName,LastIP,LastLocation,LastDate,False,Lstat='ok',kpl=getPost.kpl)
#HTTP_REFERER = getCookie('HTTP_REFERER')
#if HTTP_REFERER:
# web.setcookie('HTTP_REFERER', '88888888', -1000)
# return web.seeother(HTTP_REFERER)
#else:
# web.setcookie('HTTP_REFERER', '88888888', -1000)
# return web.seeother("/dashboard")
return "true"
else:
# 用户被禁用
return "disable"
else:
# 用户名或密码
|
错误
return "error"
class Logout:
def GET(self):
uid = getCookie('Username')
token
|
= getCookie('Token')
sidName = getCookie('xk_session')
if uid and token and sidName:
uid = decryptUID(uid)
#sfile = 'session/' + sidName
# 删除会话文件,貌似kill方法会把sessionID文件干掉
#try:
# os.remove(sfile)
#except Exception,e:
# print "Session File Error: ",Exception,":",e
# 设置cookies的status为no
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/ithorian/shared_ith_bandolier_s08.py
|
Python
|
mit
| 472
| 0.04661
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
res
|
ult.template = "object/tangible/wearables/ithorian/shared_ith_bandolier_s08.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_bandolier_s08")
#### BEGIN
|
MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
l33tdaima/l33tdaima
|
p838m/push_dominoes.py
|
Python
|
mit
| 1,778
| 0.001125
|
class Solution:
def pushDominoesSim(self, dominoes: str) -> str:
prev, curr, N = None, ["."
|
] + list(dominoes) + ["."], len(dominoes)
while prev != curr:
prev = curr[:]
i = 1
while i <= N:
if curr[i] == "." and prev[i - 1] == "R" and prev[i + 1] != "L":
curr[i], i = "R", i + 1
i += 1
|
i = N
while i >= 1:
if curr[i] == "." and prev[i + 1] == "L" and prev[i - 1] != "R":
curr[i], i = "L", i - 1
i -= 1
return "".join(curr[1:-1])
def pushDominoes(self, dominoes: str) -> str:
d, n = list("L" + dominoes + "R"), len(dominoes)
distr, posr = [n] * (n + 2), -float("Inf")
for i in range(1, n + 1):
if d[i] == "R":
posr = i
elif d[i] == "L":
posr = -float("Inf")
else:
distr[i] = min(distr[i], i - posr)
distl, posl = [n] * (n + 2), float("Inf")
for i in range(1, n + 1)[::-1]:
if d[i] == "L":
posl = i
elif d[i] == "R":
posl = float("Inf")
else:
distl[i] = min(distl[i], posl - i)
if distl[i] < distr[i]:
d[i] = "L"
elif distr[i] < distl[i]:
d[i] = "R"
return "".join(d[1:-1])
# TESTS
for dominoes, expected in [
("RR.L", "RR.L"),
(".L.R...LR..L..", "LL.RR.LLRRLL.."),
("....", "...."),
("R...", "RRRR"),
("....L", "LLLLL"),
]:
sol = Solution()
actual = sol.pushDominoes(dominoes)
print("Final domino state of", dominoes, "->", actual)
assert actual == expected
|
annikaliebgott/ImFEATbox
|
features_python/ImFEATbox/GlobalFeatures/Intensity/_SVDF.py
|
Python
|
apache-2.0
| 4,317
| 0.008571
|
import numpy as np
from scipy.misc import imrotate
from ImFEATbox.__helperCommands import conv2float
from scipy.stats import skew, kurtosis
def SVDF(I, returnShape=False):
"""
Input: - I: A 2D image
Output: - Out: A (1x780) vector containing 780 metrics calculated
from singular value decomposition
"""
# ************************************************************************
# Implemented for MRI feature extraction by the Department of Diagnostic
# and Interventional Radiology, University Hospital of Tuebingen, Germany
# and the Institute of Signal Processing and System Theory University of
# Stuttgart, Germany. Last modified: November 2016
#
# This implementation is part of ImFEATbox, a toolbox for image feature
# extraction and analysis. Available online at:
# https://github.com/annikaliebgott/ImFEATbox
#
# Contact: annika.liebgott@iss.uni-stuttgart.de
# ************************************************************************
if returnShape:
return (780,1)
## Calculate Singular Value Decomposition of the image
# convert image to float
I = conv2float(I)
I = I.T
# initialize feature variables
dia_elements = np.zeros((np.min(np.shape(I)),3))
eig_U = np.zeros((np.shape(I)[0],3))
eig_V = np.zeros((np.shape(I)[1],3))
det_U = np.zeros(3)
det_V = np.zeros(3)
trace_U = np.zeros(3)
trace_V = np.zeros(3)
rank_U = np.zeros(3)
rank_V = np.zeros(3)
median_eig_U = np.zeros(3)
median_eig_V = np.zeros(3)
max_eig_U = np.zeros(3)
max_eig_V = np.zeros(3)
mean_U = np.zeros(3)
mean_V = np.zeros(3)
mean_S = np.zeros(3)
std_U = np.zeros(3)
std_V = np.zeros(3)
std_S = np.zeros(3)
skewness_U = np.zeros(3)
skewness_V = np.zeros(3)
kurtosis_U = np.zeros(3)
kurtosis_V = np.zeros(3)
# Calculate the measures for 3 different orientations
for z in range(0,3):
if z == 1:
# rotate image by 90 degree
I = imrotate(I, 90, interp='bilinear')
elif z == 2:
# rotate image by -90 degree
I = imrotate(I, -180, interp='bilinear')
# calculate singular value decomposition with diagonal matrix S and
# unitary matrices U and V
[U,S,V] = np.linalg.svd(I)
#U, V = U.T, V.T
## feature extraction
# calculate diagonal elements of matrix S
#for i in range(0, np.count_nonzero(S)):
dia_elements[:,z] = S[:]
# eigen values of U and V
eig_U[:,z] = np.linalg.eig(U)[0]
eig_V[:,z] = np.linalg.eig(V)[0]
# determinant of U and V
det_U[z] = np.linalg.det(U)
det_V[z] = np.linalg.det(V)
# trace of U and V
trace_U[z] = np.trace(U)
trace_V[z] = np.trace(V)
# rank of U and V
rank_U[z] = np.linalg.matrix_rank(U)
rank_V[z] = np.linalg.matrix_rank(V)
# skewness of U and V
skewness_U[z] = skew(np.ndarray.flatten(U))
skewness_V[z] = skew(np.ndarray.flatten(V))
# kurtosis of U and V
kurtosis_U[z] = kurtosis(np.ndarray.flatten(U), fisher=False, bias=False)
kurtosis_V[z] = kurtosis(np.ndarray.flatten(V), fisher=False, bias=False)
# mean of U, V and S
mean_U[z] = np.mean(U)
mean_V[z] = np.mean(V)
mean_S[z] = np.mean(S)
# standard deviation of U, V and S
std_U[z] = np.std(U, ddof=1)
std_V[z] = np.std(V, ddof=1)
std_S[z] = np.std(S, ddof=1)
# median of eigen values of U and V
median_eig_U[z] = np.median(eig_U[:,z])
median_eig_V[z] = np.median(eig_V[:,z])
# maximum of eigen values of U and V
max_eig_U[z] = np.max(eig_U[:,z])
max_eig_V[z] = np.max(eig_V[:,z])
## return feature ve
|
ctor
#np.prod(np.shape(eig_U[:100,:]))
Out = np.hstack([np.ndarray.flatten(dia_elements[:40,:]),
np.ndarray.flatten(eig_U[:100,:]),
np.ndarray.flatten(eig_V[:100,:]),
det_U, det_V, trace_U, trace_V, rank_U, rank_V, skewness_U, skewness_V,
kurtosis_U, kurtosis
|
_V, mean_U, mean_V, mean_S, std_U, std_V, std_S,
median_eig_U, median_eig_V, max_eig_U, max_eig_V])
return Out
|
sdwfrost/piggy
|
extract_CDR3.py
|
Python
|
mit
| 749
| 0.048064
|
import sys
import re
from Bio import Seq,SeqIO
iname=sys.argv[1]
cdr3p=re.compile("(TT[TC]|TA[CT])(TT[CT]|TA[TC]|CA[TC]|GT[AGCT]|TGG)(TG[TC])(([GA][AGCT])|TC)[AGCT]([ACGT]{3}){5,32}TGGG[GCT][GCT]")
# Utility functions
def get_records(filename):
records=[]
for record in SeqIO.parse(filename,"fasta"):
records.append(recor
|
d)
return records
records=get_records(iname)
numrecords=len(records)
results=[]
for i in range(numrecords):
r=records[i]
strseq=str(r.seq)
m=cdr3p.search(strseq)
if m!=None:
mspan=m.span()
result=strseq[mspan[0]:mspan[1]]
else:
result=""
results.append(result)
for i in range(numrecords):
r=records[i]
des=r.description
res=results[i]
if res!="":
print ">"+des+"\n"+res[9:-6]
| |
gratefulfrog/lib
|
python/pymol/colorramping.py
|
Python
|
gpl-2.0
| 13,994
| 0.011862
|
import math
class ColorPoint:
"""
Simple color-storage class; stores way-points on a color ramp
"""
def __init__(self,idx,col,colType):
# index, X-coordinate, on a palette
self.idx = idx
# color; usually an RGBA quad
self.color = col
# One of ColorTypes members
self.colorType = colType
def __str__(self):
return "(Index=%d; Color=(%0.3f,%0.3f,%0.3f,%0.3f); ColorType=%d)" % (self.idx, self.color[0], self.color[1], self.color[2], self.color[3], self.colorType)
class ColorTypes:
"""
Simple enumerated type for internal color formats
"""
RGBAi = 0
RGBAf = 1
HEX6 = 2
HEX8 = 3
class ColorRamp:
"""
Model for a simple color ramp
See __main__ below for usage.
"""
# we assume linear ramps for now
LINEAR = 0
GAUSSIAN = 1
EXPONENTIAL = 2
CIRCLE_RADIUS = 3
def __init__(self, nColors, *args, **kwargs):
# size of this ramp
self.nColors = nColors
# the list of RGBA float values
self.ramp = []
# ordered array of color indices
self.keys = {}
# ready to use; boolean; we need at least two
# color points to define a ramp
self.ready = False
#
if 'handle' in kwargs:
self.handle = kwargs['handle']
if 'name' in kwargs:
self.name = kwargs['name']
# list of unique ids for objects on the map canvas
self.canvas_ids = {}
def __str__(self):
"""
instances created with ColorRamp(nColors=XYZ,name="foo") will return "foo"
otherwise a long-debug-friendly description is returned.
"""
if getattr(self,'name',None)!=None:
return self.name
else:
s = "Object Name: Nameless\n"
s+= "Ready to use: " + str(self.ready) + "\n"
s+= "Keys: " + str(self.keys.keys()) + "\n"
for k in self.keys:
s += "Color[%d] = %s\n" % (k,self.keys[k])
s += "ColorRamp with %d colors follows...\n" % self.nColors
if self.ready:
s += str(self.getRamp()) + "\n"
else:
s += "[]\n"
return s
def addColor(self, idx, col, colType=ColorTypes.RGBAf, colScale=1.0):
"""
adds color, 'col', to ramp at index 'idx'. If 'idx' exists, this
function overwrites the value
"""
# check user input: color location
# if beyond ends of ramp, make end of ramp
if idx<0:
idx=0
elif idx>self.nColors-1:
idx=self.nColors-1
# check user input: color format
if type(col) != ().__class__ or len(col)!=4:
print "Error: Colors must be spefied as a RGBA tuple with four values."
print "Error: %s was given instead." % str(col)
return
# check user input: color type format
if colType not in (ColorTypes.RGBAi, ColorTypes.RGBAf):
print "Error: Color type specification must be either, "
print "Error: ColorRamp.RGBAi or ColorRamp.RGBAf"
return
userCol = None
# convert color type if needed
if colType==ColorTypes.RGBAf:
userCol = col
elif colType==ColorTypes.RGBAi:
userCol = map(lambda c: float(c)/float(colScale), col)
# create a ColorPoint and insert it
self.keys[idx] = ColorPoint(idx, userCol, colType)
# is this ramp yet good to use?
self.updateReady()
# what else do we need to do to modify the model?
def checkPoint(self, pt, startX, X):
"""
Checks if there is a point between startX and X.
"""
ret_x = startX
if startX < X:
for x in range(int(startX)+1, int(X)+1):
if x in self.keys:
break
|
ret_x = x
elif startX > X:
for x in range(int(startX)-1, int(X)-1, -1):
if x in self.keys:
br
|
eak
ret_x = x
return ret_x
def getPoint(self, pt):
"""
Returns a true index (horizontal potision) of a given point.
"""
if pt in self.canvas_ids:
return self.canvas_ids[pt]
return None
def getRampList(self):
"""
Returns a list of floats representing the color ramp.
"""
ramp_list = []
for x in range(0,360):
if x in self.keys:
col = list(self.keys[x].color)
ramp_list.append(float(x))
ramp_list.append(float(col[0]))
ramp_list.append(float(col[1]))
ramp_list.append(float(col[2]))
ramp_list.append(float(col[3]))
return ramp_list
def movePoint(self,pt,X,alpha):
if pt not in self.canvas_ids:
# print "Error: Could not move pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx in self.keys:
col = list(self.keys[idx].color)
else:
# print "Logic error no such index in self.keys"
return
col[3] = alpha
# prevent extreme points from being replaced
if X <= 0:
return
if X >= 359:
return
self.removeColor(idx)
# prevent extreme points from moving horizontally
if idx == 0:
X = 0
if idx == 359:
X = 359
self.addColor(X, tuple(col))
def removePoint(self,pt):
if pt not in self.canvas_ids:
# print "Error: Could not remove pt(%d)." % pt
return
idx = self.canvas_ids[pt]
if idx <= 0 or idx >= 359:
return
self.removeColor(idx)
def removeColor(self, idx):
# check user input
if idx not in self.keys: return
if idx<0 or idx>self.nColors-1: return
# remove the point
del self.keys[idx]
# is this ramp still good to use?
self.updateReady()
def updateReady(self):
# are we ready to use?
self.ready = (0 in self.keys and self.nColors-1 in self.keys)
def updateRamp(self):
# if idx is specified then it was either added or removed
# so adjust the ramp about that point
if not self.ready:
# no use in updating a ramp w/o proper colors
print "Msg: This color ramp is not yet ready to use. Please add"
print "Msg: at least two colors at the ramp's extreme points 0 and %d" % (self.nColors-1)
return
# OPTIMIZATION TODO:
# if idx!=None and idx no in self.keys, then the point
# was removed, just update around those pts
# if idx!=None and does exists in self.keys, then they
# just added this point, so update the pts around it
self.ramp = []
keyList = self.keys.keys()
keyList.sort()
keyList.reverse()
lowerId = keyList.pop()
while len(keyList)>0:
upperId = keyList.pop()
# number of colors in between
span = int(abs(upperId-lowerId))
# get the actual colors
lowerCol, upperCol = self.keys[lowerId].color, self.keys[upperId].color
for x in range(span):
# linear mixing components
cUpper = float(x) / float(span)
cLower = 1.0 - cUpper
self.ramp.append((cLower * lowerCol[0] + cUpper * upperCol[0],
cLower * lowerCol[1] + cUpper * upperCol[1],
cLower * lowerCol[2] + cUpper * upperCol[2],
cLower * lowerCol[3] + cUpper * upperCol[3]))
lowerId = upperId
# fix the off-by one error
self.ramp.append(upperCol)
assert len(self.ramp)==self.nColors, "ColorRamp Logic Error: This ramp supports %d colors ONLY, but %d were found in the ramp." % (self.nCol
|
gwct/core
|
python/generators/muscle_gen.py
|
Python
|
gpl-3.0
| 5,889
| 0.014943
|
#!/usr/bin/python
############################################################
# Generates commands for the muscle alignment program
############################################################
import sys, os, core, argparse
############################################################
# Options
parser = argparse.ArgumentParser(description="MUSCLE command generator");
parser.add_argument("-i", dest="input", help="Directory of input FASTA files.", default=False);
parser.add_argument("-o", dest="output", help="Desired output directory for aligned files. Job name (-n) will be appended to output directory name.", default=False);
parser.add_argument("-n", dest="name", help="A short name for all files associated with this job.", default=False);
parser.add_argument("-p", dest="path", help="The path to MUSCLE. Default: muscle", default="muscle");
parser.add_argument("--overwrite", dest="overwrite", help="If the output directory already exists and you wish to overwrite it, set this option.", action="store_true", default=False);
parser.add_argument("--outname", dest="outname", help="Use the end of the output directory path as the job name.", action="store_true", default=False);
# IO options
parser.add_argument("-part", dest="part", help="SLURM partition option.", default=False);
parser.add_argument("-tasks", dest="tasks", help="SLURM --ntasks option.", type=int, default=1);
parser.add_argument("-cpus", dest="cpus", help="SLURM --cpus-per-task option.", type=int, default=1);
parser.add_argument("-mem", dest="mem", help="SLURM --mem option.", type=int, default=0);
# SLURM options
args = parser.parse_args();
if not args.input or not os.path.isdir(args.input):
sys.exit( " * Error 1: An input directory must be defined with -i.");
args.input = os.path.abspath(args.input);
if not args.name:
name = core.getRandStr();
else:
name = args.name;
if not args.output:
sys.exit( " * Error 2: An output directory must be defined with -o.");
args.output = os.path.abspath(args.output);
# if args.outname:
# name = os.path.basename(args.output);
# else:
# args.output = args.output + "-" + name + "/";
if os.path.isdir(args.output) and not args.overwrite:
sys.exit( " * Error 3: Output directory (-o) already exists! Explicity specify --overwrite to overwrite it.");
# IO option error checking
if not args.part:
sys.exit( " * Error 4: -part must be defined as a valid node partition on your clutser.");
if args.tasks < 1:
sys.exit( " * Error 5: -tasks must be a positive integer.");
if args.tasks < 1:
sys.exit( " * Error 6: -cpus must be a positive integer.");
if args.tasks < 1:
sys.exit( " * Error 7: -mem must be a positive integer.");
# SLURM option error checking
pad = 26
cwd = os.getcwd();
# Job vars
output_file = os.path.join(cwd, "jobs", "muscle_cmds_" + name + ".sh");
submit_file = os.path.join(cwd, "submit", "muscle_submit_" + name + ".sh");
logdir = os.path.join(args.output, "logs");
# Job files
##########################
# Reporting run-time info for records.
with open(output_file, "w") as outfile:
core.runTime("#!/bin/bash\n# MUSCLE command generator", outfile);
core.PWS("# IO OPTIONS", outfile);
core.PWS(core.spacedOut("# Input directory:", pad) + args.input, outfile);
if args.outname:
core.PWS(core.spacedOut("# --outname:", pad) + "Using end of output directory path as job name.", outfile);
if not args.name:
core.PWS("# -n not specified --> Generating random string for job name", outfile);
core.PWS(core.spacedOut("# Job name:", pad) + name, outfile);
core.PWS(core.spacedOut("# Output directory:", pad) + args.output, outfile);
if args.overwrite:
core.PWS(core.spacedOut("# --overwrite set:", pad) + "Overwriting previous files in output directory.", outfile);
if not os.path.isdir(args.output):
core.PWS("# Creating output directory.", outfile);
os.system("mkdir " + args.output);
core.PWS(core.spacedOut("# Logfile directory:", pad) + logdir, outfile);
if not os.path.isdir(logdir):
core.PWS("# Creating logfile directory.", outfile);
os.system("mkdir " + logdir);
core.PWS(core.s
|
pacedOut("# Job file:", pad) + output_file, outfile);
core.PWS("# ----------", outfile);
core.PWS("# SLURM OPTIONS", outfile);
core.PWS(core.spacedOut("# Submit file:", pad) + submit_file, outfile);
core.PWS(core.spacedOut("# SLURM partition:", pad) + args.part, outfile);
core.PWS(core.spacedOut(
|
"# SLURM ntasks:", pad) + str(args.tasks), outfile);
core.PWS(core.spacedOut("# SLURM cpus-per-task:", pad) + str(args.cpus), outfile);
core.PWS(core.spacedOut("# SLURM mem:", pad) + str(args.mem), outfile);
core.PWS("# ----------", outfile);
core.PWS("# BEGIN CMDS", outfile);
##########################
# Generating the commands in the job file.
for f in os.listdir(args.input):
base_input = os.path.splitext(f)[0];
cur_infile = os.path.join(args.input, f);
cur_outfile = os.path.join(args.output, base_input + "-muscle.fa");
cur_logfile = os.path.join(logdir, base_input + "-muscle.log");
muscle_cmd = args.path + " -in '" + cur_infile + "' -out '" + cur_outfile +"' > " + cur_logfile + " 2>&1";
outfile.write(muscle_cmd + "\n");
##########################
# Generating the submit script.
with open(submit_file, "w") as sfile:
submit = '''#!/bin/bash
#SBATCH --job-name={name}
#SBATCH --output={name}-%j.out
#SBATCH --mail-type=ALL
#SBATCH --mail-user=gregg.thomas@umontana.edu
#SBATCH --partition={partition}
#SBATCH --nodes=1
#SBATCH --ntasks={tasks}
#SBATCH --cpus-per-task={cpus}
#SBATCH --mem={mem}
parallel -j {tasks} < {output_file}'''
sfile.write(submit.format(name=name, partition=args.part, tasks=args.tasks, cpus=args.cpus, mem=args.mem, output_file=output_file));
##########################
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/mtx1_models/SHH_WT_models22582.py
|
Python
|
gpl-3.0
| 17,582
| 0.025082
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((7681.9, -659.304, 7494.63), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((6830.55, 913.984, 7439.66), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((5962.18, 1202.57, 5768.53), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((6075.58, -1067.94, 6332.77), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5661.76, -1480.54, 4946.79), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((5263.15, 301.948, 3262.64), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in mark
|
er_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.pla
|
ce_marker((5657.6, 1350.01, 1989.44), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((5635.18, 436.967, 2062), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6297.35, 2999.42, 1516.02), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((7688.67, 3286.28, 623.621), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7528.29, 5130.63, 896.956), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7978.73, 5062.12, 2257.63), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((8274.3, 5716.61, 3629.56), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8927.85, 4349.38, 3977.65), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((10585.2, 4795.17, 5477.35), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((10431.7, 5633.75, 8439.06), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((8498.18, 5622.95, 8718.03), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8579.44, 6803.65, 8035.93), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((8586.24, 6534.96, 6389.88), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((9478.99, 7035.42, 5315.88), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((7648.01, 6080.27, 3952.17), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((8670.79, 6900.96, 5612.24), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7482.22, 7092.88, 6016.52), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7640.8, 7712.46, 6897.92), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7937.6, 6728.67, 7833.34), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((8784.55, 6464.83, 9140.4), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((8561.12, 6559.58, 7596.64), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((8055.79, 5370.63, 5806.84), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7590.58, 6693.72, 5322), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7350.34, 6844.11, 4092.02), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((6751.74, 6797.72, 4646.23), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7496.03, 5847.74, 3396.54), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"
|
devasia1000/anti_adblock
|
examples/har_extractor.py
|
Python
|
mit
| 10,062
| 0.004373
|
"""
This inline script utilizes harparser.HAR from https://github.com/JustusW/harparser
to generate a HAR log object.
"""
try:
from harparser import HAR
from pytz import UTC
except ImportError as e:
import sys
print >> sys.stderr, "\r\nMissing dependencies: please run `pip install mitmproxy[examples]`.\r\n"
raise
from datetime import datetime, timedelta, tzinfo
class _HARLog(HAR.log):
# The attributes need to be registered here for them to actually be available later via self. This is
# due to HAREncodable linking __getattr__ to __getitem__. Anything that is set only in __init__ will
# just be added as key/value pair to self.__classes__.
__page_list__ = []
__page_count__ = 0
__page_ref__ = {}
def __init__(self, page_list):
self.__page_list__ = page_list
self.__page_count__ = 0
self.__page_ref__ = {}
HAR.log.__init__(self, {"version": "1.2",
"creator": {"name": "MITMPROXY HARExtractor",
"version": "0.1",
"comment": ""},
"pages": [],
"entries": []})
def reset(self):
self.__init__(self.__page_list__)
def add(self, obj):
if isinstance(obj, HAR.pages):
self['pages'].append(obj)
if isinstance(obj, HAR.entries):
self['entries'].append(obj)
def create_page_id(self):
|
self.__page_count__ += 1
return "autopage_%s" % str(self.__page_count__)
def set_page_ref(self, page, ref):
self.__page_ref__[page] = ref
def get_page_ref(self, page):
return self.__page_ref__.get(page, None)
def get_page_list(self):
return self.__page_list__
def start(context, argv):
"""
On start we create a HARLog instance. You will have to adapt this to suit your actual needs
|
of HAR generation. As it will probably be necessary to cluster logs by IPs or reset them
from time to time.
"""
context.dump_file = None
if len(argv) > 1:
context.dump_file = argv[1]
else:
raise ValueError('Usage: -s "har_extractor.py filename" '
'(- will output to stdout, filenames ending with .zhar will result in compressed har)')
context.HARLog = _HARLog(['https://github.com'])
context.seen_server = set()
def response(context, flow):
"""
Called when a server response has been received. At the time of this message both
a request and a response are present and completely done.
"""
# Values are converted from float seconds to int milliseconds later.
ssl_time = -.001
connect_time = -.001
if flow.server_conn not in context.seen_server:
# Calculate the connect_time for this server_conn. Afterwards add it to seen list, in
# order to avoid the connect_time being present in entries that use an existing connection.
connect_time = flow.server_conn.timestamp_tcp_setup - flow.server_conn.timestamp_start
context.seen_server.add(flow.server_conn)
if flow.server_conn.timestamp_ssl_setup is not None:
# Get the ssl_time for this server_conn as the difference between the start of the successful
# tcp setup and the successful ssl setup. If no ssl setup has been made it is left as -1 since
# it doesn't apply to this connection.
ssl_time = flow.server_conn.timestamp_ssl_setup - flow.server_conn.timestamp_tcp_setup
# Calculate the raw timings from the different timestamps present in the request and response object.
# For lack of a way to measure it dns timings can not be calculated. The same goes for HAR blocked:
# MITMProxy will open a server connection as soon as it receives the host and port from the client
# connection. So the time spent waiting is actually spent waiting between request.timestamp_end and
# response.timestamp_start thus it correlates to HAR wait instead.
timings_raw = {'send': flow.request.timestamp_end - flow.request.timestamp_start,
'wait': flow.response.timestamp_start - flow.request.timestamp_end,
'receive': flow.response.timestamp_end - flow.response.timestamp_start,
'connect': connect_time,
'ssl': ssl_time}
# HAR timings are integers in ms, so we have to re-encode the raw timings to that format.
timings = dict([(key, int(1000 * value)) for key, value in timings_raw.iteritems()])
# The full_time is the sum of all timings. Timings set to -1 will be ignored as per spec.
full_time = 0
for item in timings.values():
if item > -1:
full_time += item
started_date_time = datetime.fromtimestamp(flow.request.timestamp_start, tz=utc).isoformat()
request_query_string = [{"name": k, "value": v} for k, v in flow.request.get_query()]
request_http_version = ".".join([str(v) for v in flow.request.httpversion])
# Cookies are shaped as tuples by MITMProxy.
request_cookies = [{"name": k.strip(), "value": v[0]} for k, v in (flow.request.get_cookies() or {}).iteritems()]
request_headers = [{"name": k, "value": v} for k, v in flow.request.headers]
request_headers_size = len(str(flow.request.headers))
request_body_size = len(flow.request.content)
response_http_version = ".".join([str(v) for v in flow.response.httpversion])
# Cookies are shaped as tuples by MITMProxy.
response_cookies = [{"name": k.strip(), "value": v[0]} for k, v in (flow.response.get_cookies() or {}).iteritems()]
response_headers = [{"name": k, "value": v} for k, v in flow.response.headers]
response_headers_size = len(str(flow.response.headers))
response_body_size = len(flow.response.content)
response_body_decoded_size = len(flow.response.get_decoded_content())
response_body_compression = response_body_decoded_size - response_body_size
response_mime_type = flow.response.headers.get_first('Content-Type', '')
response_redirect_url = flow.response.headers.get_first('Location', '')
entry = HAR.entries({"startedDateTime": started_date_time,
"time": full_time,
"request": {"method": flow.request.method,
"url": flow.request.url,
"httpVersion": request_http_version,
"cookies": request_cookies,
"headers": request_headers,
"queryString": request_query_string,
"headersSize": request_headers_size,
"bodySize": request_body_size, },
"response": {"status": flow.response.code,
"statusText": flow.response.msg,
"httpVersion": response_http_version,
"cookies": response_cookies,
"headers": response_headers,
"content": {"size": response_body_size,
"compression": response_body_compression,
"mimeType": response_mime_type},
"redirectURL": response_redirect_url,
"headersSize": response_headers_size,
"bodySize": response_body_size, },
"cache": {},
"timings": timings, })
# If the current url is in the page list of context.HARLog or does not have a referrer we add it as a new
# pages object.
if flow.request.url in context.HARLog.get_page_list() or flow.request.headers.get('Referer', None) is None:
page_id = context.HARLog.create_page_id()
context.HARLog.add(HAR.pages({"startedDateTime": entry['startedDateTime'],
|
rlindner81/pyload
|
module/plugins/crypter/CrockoComFolder.py
|
Python
|
gpl-3.0
| 875
| 0.002286
|
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class CrockoComFo
|
lder(SimpleCrypter):
__name__ = "CrockoComFolder"
__type__ = "crypter"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?crocko\.com/f/.+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "D
|
efault;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Crocko.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
LINK_PATTERN = r'<td class="last"><a href="(.+?)">download</a>'
|
Agent007/deepchem
|
deepchem/rl/envs/tictactoe.py
|
Python
|
mit
| 3,149
| 0.013973
|
import numpy as np
import copy
import random
import deepchem
class TicTacToeEnvironment(deepchem.rl.Environment):
"""
Play tictactoe against a randomly acting opponent
"""
X = np.array([1.0, 0.0])
O = np.array([0.0, 1.0])
EMPTY = np.array([0.0, 0.0])
ILLEGAL_MOVE_PENALTY = -3.0
LOSS_PENALTY = -3.0
NOT_LOSS = 0.1
DRAW_REWARD = 5.0
WIN_REWARD = 10.0
def __init__(self):
super(TicTacToeEnvironment, self).__init__([(3, 3, 2)], 9)
self.reset()
def reset(self):
self._terminated = False
self._state = [np.zeros(shape=(3, 3, 2), dtype=np.float32)]
# Randomize who goes first
if random.randint(0, 1) == 1:
move = self.get_O_move()
self._state[0][move[0]][move[1]] = TicTacToeEnvironment.O
def step(self, action):
self._state = copy.deepcopy(self._state)
row = action // 3
col = action % 3
# Illegal move -- the square is not empty
if not np.all(self._state[0][row][col] == TicTacToeEnvironment.EMPTY):
self._terminated = True
return TicTacToeEnvironment.ILLEGAL_MOVE_PENALTY
# Move X
self._state[0][row][col] = TicTacToeEnvironment.X
# Did X Win
if self.check_winner(TicTacToeEnvironment.X):
self._terminated = True
return TicTacToeEnvironment.WIN_REWARD
if self.game_over():
self._terminated = True
return TicTacToeEnvironment.DRAW_REWARD
move = self.get_O_move()
self._state[0][move[0]][move[1]] = TicTacToeEnvironment.O
# Did O Win
if self.check_winne
|
r(TicTacToeEnvironment.O):
self._terminated = True
return TicTacToeEnvironment.LOSS_PENALTY
if self.game_over():
self._terminated = True
return TicTacToeEnvironment.DRAW_REWARD
return TicTacToeEnvi
|
ronment.NOT_LOSS
def get_O_move(self):
empty_squares = []
for row in range(3):
for col in range(3):
if np.all(self._state[0][row][col] == TicTacToeEnvironment.EMPTY):
empty_squares.append((row, col))
return random.choice(empty_squares)
def check_winner(self, player):
for i in range(3):
row = np.sum(self._state[0][i][:], axis=0)
if np.all(row == player * 3):
return True
col = np.sum(self._state[0][:][i], axis=0)
if np.all(col == player * 3):
return True
diag1 = self._state[0][0][0] + self._state[0][1][1] + self._state[0][2][2]
if np.all(diag1 == player * 3):
return True
diag2 = self._state[0][0][2] + self._state[0][1][1] + self._state[0][2][0]
if np.all(diag2 == player * 3):
return True
return False
def game_over(self):
for i in range(3):
for j in range(3):
if np.all(self._state[0][i][j] == TicTacToeEnvironment.EMPTY):
return False
return True
def display(self):
state = self._state[0]
s = ""
for row in range(3):
for col in range(3):
if np.all(state[row][col] == TicTacToeEnvironment.EMPTY):
s += "_"
if np.all(state[row][col] == TicTacToeEnvironment.X):
s += "X"
if np.all(state[row][col] == TicTacToeEnvironment.O):
s += "O"
s += "\n"
return s
|
isyippee/nova
|
nova/objects/fields.py
|
Python
|
apache-2.0
| 19,847
| 0.00005
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed und
|
er the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedD
|
ict
from distutils import versionpredicate
import netaddr
from oslo_utils import strutils
from oslo_versionedobjects import fields
import six
# TODO(berrange) Temporary import for Arch class
from nova.compute import arch
# TODO(berrange) Temporary import for CPU* classes
from nova.compute import cpumodel
# TODO(berrange) Temporary import for HVType class
from nova.compute import hv_type
# TODO(berrange) Temporary import for VMMode class
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
# Import field errors from oslo.versionedobjects
KeyTypeError = fields.KeyTypeError
ElementTypeError = fields.ElementTypeError
# Import fields from oslo.versionedobjects
BooleanField = fields.BooleanField
UnspecifiedDefault = fields.UnspecifiedDefault
IntegerField = fields.IntegerField
UUIDField = fields.UUIDField
FloatField = fields.FloatField
StringField = fields.StringField
EnumField = fields.EnumField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
DictOfNullableStringsField = fields.DictOfNullableStringsField
DictOfIntegersField = fields.DictOfIntegersField
ListOfStringsField = fields.ListOfStringsField
SetOfIntegersField = fields.SetOfIntegersField
ListOfSetsOfIntegersField = fields.ListOfSetsOfIntegersField
ListOfDictOfNullableStringsField = fields.ListOfDictOfNullableStringsField
DictProxyField = fields.DictProxyField
ObjectField = fields.ObjectField
ListOfObjectsField = fields.ListOfObjectsField
# NOTE(danms): These are things we need to import for some of our
# own implementations below, our tests, or other transitional
# bits of code. These should be removable after we finish our
# conversion
Enum = fields.Enum
Field = fields.Field
FieldType = fields.FieldType
Set = fields.Set
Dict = fields.Dict
List = fields.List
Object = fields.Object
class Architecture(Enum):
# TODO(berrange): move all constants out of 'nova.compute.arch'
# into fields on this class
def __init__(self, **kwargs):
super(Architecture, self).__init__(
valid_values=arch.ALL, **kwargs)
def coerce(self, obj, attr, value):
try:
value = arch.canonicalize(value)
except exception.InvalidArchitectureName:
msg = _("Architecture name '%s' is not valid") % value
raise ValueError(msg)
return super(Architecture, self).coerce(obj, attr, value)
class BlockDeviceDestinationType(Enum):
"""Represents possible destination_type values for a BlockDeviceMapping."""
LOCAL = 'local'
VOLUME = 'volume'
ALL = (LOCAL, VOLUME)
def __init__(self):
super(BlockDeviceDestinationType, self).__init__(
valid_values=BlockDeviceDestinationType.ALL)
class BlockDeviceSourceType(Enum):
"""Represents the possible source_type values for a BlockDeviceMapping."""
BLANK = 'blank'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
VOLUME = 'volume'
ALL = (BLANK, IMAGE, SNAPSHOT, VOLUME)
def __init__(self):
super(BlockDeviceSourceType, self).__init__(
valid_values=BlockDeviceSourceType.ALL)
class BlockDeviceType(Enum):
"""Represents possible device_type values for a BlockDeviceMapping."""
CDROM = 'cdrom'
DISK = 'disk'
FLOPPY = 'floppy'
FS = 'fs'
LUN = 'lun'
ALL = (CDROM, DISK, FLOPPY, FS, LUN)
def __init__(self):
super(BlockDeviceType, self).__init__(
valid_values=BlockDeviceType.ALL)
class CPUAllocationPolicy(Enum):
DEDICATED = "dedicated"
SHARED = "shared"
ALL = (DEDICATED, SHARED)
def __init__(self):
super(CPUAllocationPolicy, self).__init__(
valid_values=CPUAllocationPolicy.ALL)
class CPUMode(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMode, self).__init__(
valid_values=cpumodel.ALL_CPUMODES, **kwargs)
class CPUMatch(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMatch, self).__init__(
valid_values=cpumodel.ALL_MATCHES, **kwargs)
class CPUFeaturePolicy(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUFeaturePolicy, self).__init__(
valid_values=cpumodel.ALL_POLICIES, **kwargs)
class DiskBus(Enum):
FDC = "fdc"
IDE = "ide"
SATA = "sata"
SCSI = "scsi"
USB = "usb"
VIRTIO = "virtio"
XEN = "xen"
LXC = "lxc"
UML = "uml"
ALL = (FDC, IDE, SATA, SCSI, USB, VIRTIO, XEN, LXC, UML)
def __init__(self):
super(DiskBus, self).__init__(
valid_values=DiskBus.ALL)
class HVType(Enum):
# TODO(berrange): move all constants out of 'nova.compute.hv_type'
# into fields on this class
def __init__(self):
super(HVType, self).__init__(
valid_values=hv_type.ALL)
def coerce(self, obj, attr, value):
try:
value = hv_type.canonicalize(value)
except exception.InvalidHypervisorVirtType:
msg = _("Hypervisor virt type '%s' is not valid") % value
raise ValueError(msg)
return super(HVType, self).coerce(obj, attr, value)
class OSType(Enum):
LINUX = "linux"
WINDOWS = "windows"
ALL = (LINUX, WINDOWS)
def __init__(self):
super(OSType, self).__init__(
valid_values=OSType.ALL)
def coerce(self, obj, attr, value):
# Some code/docs use upper case or initial caps
# so canonicalize to all lower case
value = value.lower()
return super(OSType, self).coerce(obj, attr, value)
class RNGModel(Enum):
VIRTIO = "virtio"
ALL = (VIRTIO,)
def __init__(self):
super(RNGModel, self).__init__(
valid_values=RNGModel.ALL)
class SCSIModel(Enum):
BUSLOGIC = "buslogic"
IBMVSCSI = "ibmvscsi"
LSILOGIC = "lsilogic"
LSISAS1068 = "lsisas1068"
LSISAS1078 = "lsisas1078"
VIRTIO_SCSI = "virtio-scsi"
VMPVSCSI = "vmpvscsi"
ALL = (BUSLOGIC, IBMVSCSI, LSILOGIC, LSISAS1068,
LSISAS1078, VIRTIO_SCSI, VMPVSCSI)
def __init__(self):
super(SCSIModel, self).__init__(
valid_values=SCSIModel.ALL)
def coerce(self, obj, attr, value):
# Some compat for strings we'd see in the legacy
# vmware_adaptertype image property
value = value.lower()
if value == "lsilogicsas":
value = SCSIModel.LSISAS1068
elif value == "paravirtual":
value = SCSIModel.VMPVSCSI
return super(SCSIModel, self).coerce(obj, attr, value)
class VideoModel(Enum):
CIRRUS = "cirrus"
QXL = "qxl"
VGA = "vga"
VMVGA = "vmvga"
XEN = "xen"
ALL = (CIRRUS, QXL, VGA, VMVGA, XEN)
def __init__(self):
super(VideoModel, self).__init__(
valid_values=VideoModel.ALL)
class VIFModel(Enum):
LEGACY_VALUES = {"virtuale1000":
network_model.VIF_MODEL_E1000,
"virtuale1000e":
network_model.VIF_MODEL_E1000E,
"virtualpcnet32":
network_model.VIF_MODEL_PCNET,
"virtualsriovethernetcard":
network_model.VIF_MODEL_SRIOV,
|
shakamunyi/sahara
|
sahara/tests/unit/plugins/vanilla/hadoop2/test_validation.py
|
Python
|
apache-2.0
| 4,497
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from sahara.plugins import exceptions as ex
from sahara.plugins.vanilla import plugin as p
from sahara.tests.unit import base
from sahara.tests.unit import testutils as tu
class ValidationTest(base.SaharaTestCase):
def setUp(self):
super(ValidationTest, self).setUp()
self.pl = p.VanillaProvider()
def test_validate(self):
self.ng = []
self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
self.ng.append(tu.make_ng_dict("sn", "f1", ["secondarynamenode"], 0))
self.ng.append(tu.make_ng_dict("jt", "f1", ["resourcemanager"], 0))
self.ng.append(tu.make_ng_dict("tt", "f1", ["nodemanager"], 0))
self.ng.append(tu.make_ng_dict("dn", "f1", ["datanode"], 0))
self.ng.append(tu.make_ng_dict("hs", "f1", ["historyserver"], 0))
self.ng.append(tu.make_ng_dict("oo", "f1", ["oozie"], 0))
self._validate_case(1, 1, 1, 10, 10, 0, 0)
self._validate_case(1, 0, 1, 1, 4, 0, 0)
self._validate_case(1, 1, 1, 0, 3, 0, 0)
self._validate_case(1, 0, 1, 0, 3, 0, 0)
self._validate_case(1, 1, 0, 0, 3, 0, 0)
self._validate_case(1, 0, 1, 1, 3, 1, 1)
self._validate_case(1, 1, 1, 1, 3, 1, 0)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(0, 0, 1, 10, 3, 0, 0)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(2, 0, 1, 10, 3, 0, 0)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 2, 1, 1, 3, 1, 1)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 0, 10, 3, 0, 0)
with testtools.ExpectedExcepti
|
on(ex.InvalidComponentCountException):
self._validate_case(1, 0, 2, 10, 3, 0, 0)
|
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 0, 1, 1, 3, 2, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 0, 1, 1, 3, 1, 2)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 1, 1, 0, 2, 0, 0)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 1, 1, 3, 0, 1)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 1, 0, 3, 1, 1)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 1, 1, 0, 1, 1)
cl = self._create_cluster(
1, 1, 1, 0, 3, 0, 0,
cluster_configs={'HDFS': {'dfs.replication': 4}})
with testtools.ExpectedException(ex.InvalidComponentCountException):
self.pl.validate(cl)
self.ng.append(tu.make_ng_dict("hi", "f1", ["hiveserver"], 0))
self.ng.append(tu.make_ng_dict("sh", "f1",
["spark history server"], 0))
self._validate_case(1, 1, 0, 0, 3, 0, 0, 1, 0)
self._validate_case(1, 1, 0, 0, 3, 0, 0, 0, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 1, 0, 0, 3, 0, 0, 2, 0)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 1, 0, 0, 3, 0, 0, 0, 2)
def _create_cluster(self, *args, **kwargs):
lst = []
for i in range(0, len(args)):
self.ng[i]['count'] = args[i]
lst.append(self.ng[i])
return tu.create_cluster("cluster1", "tenant1", "vanilla",
"2.7.1", lst, **kwargs)
def _validate_case(self, *args):
cl = self._create_cluster(*args)
self.pl.validate(cl)
|
hasadna/OpenTrain
|
webserver/opentrain/timetable/migrations/0006_auto__add_field_tttrip_date.py
|
Python
|
bsd-3-clause
| 2,370
| 0.006329
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TtTrip.date'
db.add_column(u'timetable_tttrip', 'date',
self.gf('django.
|
db.models.fields.DateTimeField')(null=True, b
|
lank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TtTrip.date'
db.delete_column(u'timetable_tttrip', 'date')
models = {
u'timetable.ttstop': {
'Meta': {'object_name': 'TtStop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'stop_lat': ('django.db.models.fields.FloatField', [], {}),
'stop_lon': ('django.db.models.fields.FloatField', [], {}),
'stop_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stop_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'timetable.ttstoptime': {
'Meta': {'object_name': 'TtStopTime'},
'exp_arrival': ('django.db.models.fields.DateTimeField', [], {}),
'exp_departure': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtStop']"}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtTrip']"})
},
u'timetable.tttrip': {
'Meta': {'object_name': 'TtTrip'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'trip_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['timetable']
|
ooblog/yonmoji_ge
|
LTsv/LTsv_gui.py
|
Python
|
mit
| 127,236
| 0.032237
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
import subprocess
import codecs
import ctypes
import struct
import uuid
import datetime
import math
from LTsv_file import *
from LTsv_printf import *
LTsv_Tkinter=True
try:
import tkinter as Tk
import tkinter.scrolledtext as Tk_sc
import tkinter.filedialog as Tk_fd
# import messagebox as Tk_mb
except:
LTsv_Tkinter=False
#if LTsv_Tkinter == False:
# #http://shinobar.server-on.net/puppy/opt/tcl_tk-8.5.7-1-p4.sfs
# if os.path.exists("/usr/lib/python3.4"):
# sys.path.append("/usr/lib/python3.4")
# try:
# import tkinter as Tk
# import tkinter.scrolledtext as Tk_sc
# import tkinter.filedialog as Tk_fd
## import messagebox as Tk_mb
# LTsv_Tkinter=True
# except:
# LTsv_Tkinter=False
LTsv_libgtk,LTsv_libgdk,LTsv_libobj=None,None,None
LTsv_user32,LTsv_shell32,LTsv_kernel32,LTsv_gdi32=None,None,None,None
LTsv_GUI_ERROR,LTsv_GUI_GTK2,LTsv_GUI_Tkinter,LTsv_GUI_WinAPI="","GTK2","Tkinter","WinAPI"
LTsv_GUI,LTsv_Notify=LTsv_GUI_ERROR,LTsv_GUI_ERROR
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.POINTER(ctypes.c_ulong))
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_bool,ctypes.c_void_p)
#LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.c_int)
LTsv_CALLBACLTYPE=ctypes.CFUNCTYPE(ctypes.c_void_p,ctypes.c_void_p)
LTsv_widgetLTSV=LTsv_newfile("LTsv_gui",LTsv_default=None)
LTsv_widgetOBJ={}; LTsv_widgetOBJcount=0
LTsv_timerOBJ={}; LTsv_timer_cbk={}
LTsv_canvas_motion_X,LTsv_canvas_motion_Y,LTsv_canvas_motion_Z=0,0,""
canvas_EMLenter,canvas_EMLmotion,canvas_EMLleave={},{},{}
canvas_CBKenter,canvas_CBKmotion,canvas_CBKleave,canvas_CBKtimeout,canvas_CBKafter,LTsv_canvasCBKpagename={},{},{},{},{},{}
LTsv_pictureOBJ,LTsv_pictureW,LTsv_pictureH={},{},{}
LTsv_iconOBJ={}; LTsv_iconOBJnotify=[]
LTsv_popupmenuOBJ={}
LTsv_default_iconuri=""
def LTsv_guiCDLLver(LTsv_libname,LTsv_libvermin,LTsv_libvermax):
LTsv_min,LTsv_max=(LTsv_libvermin,LTsv_libvermax) if LTsv_libvermin <= LTsv_libvermax else (LTsv_libvermax,LTsv_libvermin)
if LTsv_min == LTsv_max:
LTsv_max+=1
LTsv_CDLL=None
for LTsv_libver in range(LTsv_min,LTsv_max):
LTsv_CDLL=ctypes.CDLL(LTsv_libname.replace('?',str(LTsv_libver)))
if LTsv_CDLL != None:
break
return LTsv_CDLL
def LTsv_guiinit(LTsv_guistyle=LTsv_GUI_GTK2,LTsv_libvermin=0,LTsv_libvermax=0):
global LTsv_GUI,LTsv_Notify,LTsv_default_iconuri
global LTsv_libgtk,LTsv_libgdk,LTsv_libobj,LTsv_user32,LTsv_shell32,LTsv_kernel32,LTsv_gdi32
LTsv_GUI=LTsv_guistyle
if LTsv_GUI == LTsv_GUI_GTK2:
LTsv_Notify=LTsv_GUI_GTK2; LTsv_default_iconuri="/usr/share/pixmaps/python.xpm"
if sys.platform.startswith("linux"): #"/usr/lib/libgtk-x11-2.0.so.0"
LTsv_libgtk=LTsv_guiCDLLver("libgtk-x11-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libgtk.gtk_range_get_value.restype=ctypes.c_double
LTsv_libgdk=LTsv_guiCDLLver("libgdk-x11-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libobj=LTsv_guiCDLLver("libgobject-2.0.so.?",LTsv_libvermin,LTsv_libvermax)
LTsv_libobj.g_timeout_add.restype=ctypes.c_uint
# if sys.platform.startswith("cygwin"):
# LTsv_libgtk=LTsv_guiCDLLver("cyggtk-x11-2.0-?.dll",0,10)
# LTsv_libgdk=LTsv_guiCDLLver("cyggdk-x11-2.0-?.dll",0,10)
# LTsv_libobj=LTsv_guiCDLLver("cyggobject-2.0-?.dll",0,10)
# if sys.platform.startswith("darwin"):
# LTsv_libgtk=ctypes.CDLL("/opt/local/lib/libgtk-x11-2.0.0.dylib")#"/Library/Frameworks/Gtk.framework/Libraries/libgtk-quartz-2.0.0.dylib"
# LTsv_libgdk=ctypes.CDLL("/opt/local/lib/libgdk-x11-2.0.0.dylib")#"/Library/Frameworks/Gtk.framework/Libraries/libgdk-quartz-2.0.0.dylib"
# LTsv_libobj=ctypes.CDLL("/opt/local/lib/libgobject-2.0.0.dylib")#"/Library/Frameworks/Glib.framework/Libraries/libgobject-2.0.0.dylib"
if LTsv_libgtk == None or LTsv_libgdk == None or LTsv_libobj == None:
# if sys.platform.startswith("win"):
# LTsv_GUI=LTsv_GUI_WinAPI
LTsv_GUI=LTsv_GUI_Tkinter
else:
LTsv_libgtk.gtk_init(0,0)
if LTsv_GUI == LTsv_GUI_WinAPI or LTsv_GUI == LTsv_GUI_Tkinter:
if sys.platform.startswith("win"):
LTsv_Notify=LTsv_GUI_WinAPI; LTsv_default_iconuri=sys.executable
LTsv_shell32=ctypes.windll.shell32
LTsv_user32=ctypes.windll.user32
LTsv_kernel32=ctypes.windll.kernel32
LTsv_gdi32=ctypes.windll.gdi32
elif sys.platform.startswith("linux"):
pass
else:
LTsv_GUI,LTsv_Notify=LTsv_GUI_ERROR,LTsv_GUI_ERROR; LTsv_default_iconuri=""
if not LTsv_GUI in [LTsv_GUI_ERROR,LTsv_GUI_GTK2,LTsv_GUI_Tkinter,LTsv_GUI_WinAPI]: LTsv_GUI=LTsv_GUI_ERROR
return LTsv_GUI
def LTsv_global_GUI(): return LTsv_GUI
def LTsv_global_Notify(): return LTsv_Notify
def LTsv_global_GTK2(): return LTsv_GUI_GTK2
def LTsv_global_Tkinter(): return LTsv_GUI_Tkinter
def LTsv_global_WinAPI(): return LTsv_GUI_WinAPI
def LTsv_global_libgtk(): return LTsv_libgtk
def LTsv_global_libgdk(): return LTsv_libgdk
def LTsv_global_libobj(): return LTsv_libobj
def LTsv_global_canvasmotionX(): return LTsv_canvas_motion_X
def LTsv_global_canvasmotionY(): return LTsv_canvas_motion_Y
def LTsv_global_canvasmotionZ(): return LTsv_canvas_motion_Z
def LTsv_global_canvascolor(): return LTsv_canvascolor
def LTsv_global_canvasbgcolor(): return LTsv_canvasbgcolor
#def LTsv_global_widgetgetltsv(): return LTsv_widgetLTSV
def LTsv_global_widgetltsv(new_LTSV=None):
global LTsv_widgetLTSV
LTsv_widgetLTSV=LTsv_widgetLTSV if new_LTSV == None else new_LTSV
return LTsv_widgetLTSV
def LTsv_global_widgetgetpage(LTsv_widgetPAGENAME): return LTsv_getpage(LTsv_widgetLTSV,LTsv_widgetPAGENAME)
def LTsv_global_widgetOBJ(LTsv_objid): return LTsv_widgetOBJ[LTsv_objid]
def LTsv_global_pictureOBJ(LTsv_objid): return LTsv_pictureOBJ[LTsv_objid]
def LTsv_global_pictureW(LTsv_objid): return LTsv_pictureW[LTsv_objid]
def LTsv_global_pictureH(LTsv_objid): return LTsv_pictureH[LTsv_objid]
def LTsv_global_iconOBJ(LTsv_objid): return LTsv_iconOBJ[LTsv_objid]
def LTsv_global_popupmenuOBJ(LTsv_objid): return LTsv_popupmenuOBJ[LTsv_objid]
def LTsv_widget_newUUID(LTsv_widgetID=None):
global LTsv_widget_oldID
if LTsv_widgetID == False:
|
LTsv_uuid=LTsv_widget_oldID
else:
LTsv_uuid=uuid.uuid4().hex+'+'+str(time.time())
LTsv_widget_oldID=LTsv_uuid
return LTsv_uuid
LTsv_widget_oldID=LTsv_widget_newUUID()
def LTsv_widget_newobj(LTsv_widgetPAGE,LTsv_widgetoption,widget_obj):
global LTsv_widgetOBJ,LTsv_widgetOBJcount
LTsv_widgetPAGE=LTsv_pushlinerest(LTsv_widgetPAGE,LTsv_widgetoption,str(LTsv_
|
widgetOBJcount))
LTsv_widgetOBJ[str(LTsv_widgetOBJcount)]=widget_obj; LTsv_widgetOBJcount+=1
return LTsv_widgetPAGE
def LTsv_widget_getobj(LTsv_widgetPAGE,LTsv_widgetoption):
LTsv_widgetOBJcount=LTsv_readlinerest(LTsv_widgetPAGE,LTsv_widgetoption)
if LTsv_widgetOBJcount in LTsv_widgetOBJ:
return LTsv_widgetOBJ[LTsv_widgetOBJcount]
else:
return None
def LTsv_widgetPAGEXYWH(LTsv_widgetPAGE,widget_o=None,widget_k=None,widget_t=None,widget_u=None,widget_s=None,widget_e=None,widget_a=None,widget_v=None,widget_b=None, \
widget_p=None,widget_m=None,widget_g=None,widget_f=None,widget_x=None,widget_y=None,widget_w=None,widget_h=None,widget_c=None, \
event_z=None,event_k=None,event_y=None,event_b=None,event_p=None,event_r=None,event_e=None,event_m=None,event_l=None,event_a=None,
|
Xaroth/plex-export
|
docs/conf.py
|
Python
|
mit
| 9,091
| 0.00517
|
# -*- coding: utf-8 -*-
#
# destiny_account documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 15 00:23:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ['SETUP_NORUN'] = '1'
import setup as setup_info # noqa
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = 'default'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = setup_info.VERBOSE_NAME
copyright = u'%d, %s' % (datetime.now().year, setup_info.AUTHOR_NAME)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = setup_info.VERSION
# The full version, including alpha/beta/rc tags
|
.
release = setup_info.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that ma
|
tch files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s documentation" % (setup_info.VERBOSE_NAME, setup_info.VERSION)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % setup_info.NAME
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', '%s.tex' % setup_info.NAME, u'%s Documentation' % setup_info.VERBOSE_NAME,
setup_info.AUTHOR_NAME, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', setup_info.NAME, u'%s Documentation' % setup_info.VERBOSE_NAME,
[setup_info.AUTHOR_NAME], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options f
|
entwanne/NAGM
|
games/test_game/attacks.py
|
Python
|
bsd-3-clause
| 1,048
| 0.009579
|
from nagm.engine.attack import Attack
from .types import *
from .defs import precision, stat, heal, offensive, faux_chage_effect
prec = precision(prec=0.9)
mimi_queue = Attack(name='Mimi-queue', type=normal, effects=(prec, stat(stat='dfse', value=-1),))
charge = Attack(name='Charge', type=normal, effects=(prec, offensive(force=10),))
griffe = Attack(name='Griffe', type=normal, effects=(prec, offensive(force=10),))
fouet_lianes = Attack(name='Fouet lianes', type=plante, effects=(prec, offensive(force=20),))
flameche = Att
|
ack(name='Flamèche', type=feu, e
|
ffects=(prec, offensive(force=20),))
pistolet_a_o = Attack(name='Pistolet à o', type=eau, effects=(prec, offensive(force=20),))
eclair = Attack(name='Éclair', type=electrik, effects=(prec, offensive(force=20),))
soin = Attack(name='Soin', type=normal, effects=(prec, heal(heal=50),), reflexive=True)
abime = Attack(name='Abîme', type=normal, effects=(precision(prec=0.1), offensive(force=1000),))
faux_chage = Attack(name='Faux-chage', type=normal, effects=(prec, faux_chage_effect,))
|
chrisws/scummvm
|
devtools/tasmrecover/tasm/parser.py
|
Python
|
gpl-2.0
| 7,312
| 0.040618
|
# ScummVM - Graphic Adventure Engine
#
# ScummVM is the legal property of its developers, whose names
# are too numerous to list here. Please refer to the COPYRIGHT
# file distributed with this source distribution.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os, re
from proc import proc
import lex
import op
class parser:
def __init__(self):
self.strip_path = 0
self.__globals = {}
self.__offsets = {}
self.__stack = []
self.proc = None
self.proc_list = []
self.binary_data = []
self.symbols = []
self.link_later = []
def visible(self):
for i in self.__stack:
if not i or i == 0:
return False
return True
def push_if(self, text):
value = self.eval(text)
#print "if %s -> %s" %(text, value)
self.__stack.append(value)
def push_else(self):
#print "else"
self.__stack[-1] = not self.__stack[-1]
def pop_if(self):
#print "endif"
return self.__stack.pop()
def set_global(self, name, value):
if len(name) == 0:
raise Exception("empty name is not allowed")
name = name.lower()
#print "adding global %s -> %s" %(name, value)
if self.__globals.has_key(name):
raise Exception("global %s was already defined", name)
self.__globals[name] = value
def get_global(self, name):
name = name.lower()
g = self.__globals[name]
g.used = True
return g
def get_globals(self):
return self.__globals
def has_global(self, name):
name = name.lower()
return self.__globals.has_key(name)
def set_offset(self, name, value):
if len(name) == 0:
raise Exception("empty name is not allowed")
name = name.lower()
#print "adding global %s -> %s" %(name, value)
if self.__offsets.has_key(name):
raise Exception("global %s was already defined", name)
self.__offsets[name] = value
def get_offset(self, name):
name = name.lower()
return self.__offsets[name]
def include(self, basedir, fname):
path = fname.split('\\')[self.strip_path:]
path = os.path.join(basedir, os.path.pathsep.join(path))
#print "including %s" %(path)
self.parse(path)
def eval(self, stmt):
try:
return self.parse_int(stmt)
except:
pass
value = self.__globals[stmt.lower()].value
return int(value)
def expr_callback(self, match):
name = match.group(1).lower()
g = self.get_global(name)
if isinstance(g, op.const):
return g.value
else:
return "0x%04x" %g.offset
def eval_expr(self, expr):
n = 1
while n > 0:
expr, n = re.subn(r'\b([a-zA-Z_]+[a-zA-Z0-9_]*)', self.expr_callback, expr)
return eval(expr)
def expand_globals(self, text):
return text
def fix_dollar(self, v):
print("$ = %d" %len(self.binary_data))
return re.sub(r'\$', "%d" %len(self.binary_data), v)
def parse_int(self, v):
if re.match(r'[01]+b$', v):
v = int(v[:-1], 2)
if re.match(r'[\+-]?[0-9a-f]+h$', v):
v = int(v[:-1], 16)
return int(v)
def compact_data(self, width, data):
#print "COMPACTING %d %s" %(width, data)
r = []
base = 0x100 if width == 1 else 0x10000
for v in data:
if v[0] == '"':
if v[-1] != '"':
raise Exception("invalid string %s" %v)
if width == 2:
raise Exception("string with data width more than 1") #we could allow it :)
for i in xrange(1, len(v) - 1):
r.append(ord(v[i]))
continue
m = re.match(r'(\w+)\s+dup\s+\((\s*\S+\s*)\)', v)
if m is not None:
#we should parse that
n = self.parse_int(m.group(1))
if m.group(2) != '?':
value = self.parse_int(m.group(2))
else:
value = 0
for i in xrange(0, n):
v = value
for b in xrange(0, width):
r.append(v & 0xff);
v >>= 8
continue
try:
v = self.parse_int(v)
if v < 0:
v += base
except:
#global name
print "global/expr: %s" %v
try:
g = self.get_global(v)
v = g.offset
except:
print "unknown address %s" %(v)
self.link_later.append((len(self.binary_data) + len(r), v))
v = 0
for b in xrange(0, width):
r.append(v & 0xff);
v >>= 8
#print r
return r
def parse(self, fname):
# print "opening file %s..." %(fname, basedir)
fd = open(fname, 'rb')
for line in fd:
line = line.strip()
if len(line) == 0 or line[0] == ';' or line[0] == chr(0x1a):
continue
#print line
m = re.match('(\w+)\s*?:', line)
if m is not None:
line = line[len(m.group(0)):].strip()
if self.visible():
name = m.group(1)
if self.proc is not None:
self.proc.add_label(name)
print "offset %s -> %d" %(name, len(self.binary_data))
self.set_offset(name, (len(self.binary_data), self.proc, len(self.proc.stmts) if self.proc is not None else 0))
#print line
cmd = line.split()
if len(cmd) == 0:
continue
cmd0 = str(cmd[0])
if cmd0 == 'if':
self.push_if(cmd[1])
continue
elif cmd0 == 'else':
self.push_else()
continue
elif cmd0 == 'endif':
self.pop_if()
continue
if not self.visible():
continue
if cmd0 == 'db' or cmd0 == 'dw' or cmd0 == 'dd':
arg = line[len(cmd0):].strip()
print "%d:1: %s" %(len(self.binary_data), arg) #fixme: COPYPASTE
binary_width = {'b': 1, 'w': 2, 'd': 4}[cmd0[1]]
self.binary_data += self.compact_data(binary_width, lex.parse_args(arg))
continue
elif cmd0 == 'include':
self.include(os.path.dirname(fname), cmd[1])
continue
elif cmd0 == 'endp':
self.proc = None
continue
elif cmd0 == 'assume':
print "skipping: %s" %line
continue
elif cmd0 == 'rep':
self.proc.add(cmd0)
self.proc.add(" ".join(cmd[1:]))
continue
if len(cmd) >= 3:
cmd1 = cmd[1]
if cmd1 == 'equ':
v = cmd[2]
self.set_global(cmd0, op.const(self.fix_dollar(v)))
elif cmd1 == 'db' or cmd1 == 'dw' or cmd1 == 'dd':
binary_width = {'b': 1, 'w': 2, 'd': 4}[cmd1[1]]
offset = len(self.binary_data)
arg = line[len(cmd0):].strip()
arg = arg[len(cmd1):].strip()
print "%d: %s" %(offset, arg)
self.binary_data += self.compact_data(binary_width, lex.parse_args(arg))
self.set_global(cmd0.lower(), op.var(binary_width, offset))
continue
elif cmd1 == 'proc':
name = cmd0.lower()
self.proc = proc(name)
print "procedure %s, #%d" %(name, len(self.proc_list))
self.proc_list.append(name)
self.set_global(name, self.proc)
continue
if (self.proc):
self.proc.add(line)
else:
#print line
pass
fd.close()
return se
|
lf
def link(self):
for addr, exp
|
r in self.link_later:
v = self.eval_expr(expr)
print "link: patching %04x -> %04x" %(addr, v)
while v != 0:
self.binary_data[addr] = v & 0xff
addr += 1
v >>= 8
|
DeltaEpsilon-HackFMI2/FMICalendar-REST
|
schedule/models.py
|
Python
|
mit
| 4,666
| 0.00349
|
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
class Place(models.Model):
"""
Holder object for basic info about the rooms
in the university.
"""
room_place = models.CharField(max_length=255)
floor = models.IntegerField()
def __unicode__(self):
return self.room_place
class HierarchyUnit(models.Model):
PROGRAM = 'PR'
YEAR = 'YR'
GROUP = 'GR'
TYPES = (
(PROGRAM, u"Специалност"),
(YEAR, u"Курс"),
(GROUP, u"Група"),
)
type_value = models.CharField(max_length=255, choices=TYPES)
value = models.CharField(max_length=255)
parent = models.ForeignKey("schedule.HierarchyUnit", null=True, blank=True, default=None)
def get_all_info_for_parents(self):
if self.type_value == 'PR':
return self.value
if self.type_value == 'YR':
return ', '.join([self.parent.value, self.value+u' курс'])
else:
return ', '.join([self.parent.parent.value, self.parent.value+u' курс', self.value+u' група'])
def get_all_childs(self):
return HierarchyUnit.objects.filter(parent=self)
def __unicode__(self):
return self.get_all_info_for_parents()
class Block(models.Model):
"""
Group representing a set of optional subjects.
Example: Core of Computer Science.
"""
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Subject(models.Model):
"""
Representation of all subjects.
Example: Calculus 1.
"""
MANDATORY = 'MN'
OPTIONAL = 'OP'
TYPES = (
(MANDATORY, u"Задължителен"),
(OPTIONAL, u"Избираем"),
)
type_value = models.CharField(max_length=255, choices=TYPES)
name = models.CharField(max_length=255)
block = models.ForeignKey(Block, null=True, blank=True, default=None)
year = models.ForeignKey(HierarchyUnit, null=True, blank=True, default=None, limit_choices_to={'type_value': HierarchyUnit.YEAR})
def get_year_value(self):
return ', '.join([self.year.parent.value, self.year.value+u' курс'])
def __unicode__(self):
return self.name
class Department(models.Model):
"""
Group representing a set of lecturers
grouped by field of teaching.
"""
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=255)
title = models.CharField(max_length=255)
email = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
position = models.CharField(max_length=255)
subjects = models.ManyToManyField(Subject, null=True, blank=True, default=None)
department = models.ForeignKey(Department, null=True, blank=True, default=None)
def __unicode__(self):
return self.name
class Event(models.Model):
WEEKLY = 'WKL'
TYPES = (
(WEEKLY, u'Седмично'),
)
type_value = models.CharField(max_length=255, null=True, blank=True, default=None)
inserted = models.DateField(default=datetime.now())
name = models.CharField(max_length=255)
place = models.ForeignKey(Place, blank=True, default=None, null=True)
date_start = models.DateTimeField()
date_end = models.DateTimeField(default=datetime.now())
repeatable = models.BooleanField()
duratation = models.IntegerField()
subject = models.ForeignKey(Subject, blank=True, default=None, null=True)
teacher = models.ForeignKey(Teacher, blank=True, default=None, null=True)
def __unicode__(self):
return self.name
class Student(models.Model):
PROGRAM = (('BK', 'Бакалавър'),('MG', 'Магистър'))
name = models.CharField(max_length=255)
program = models.CharField(max_length=255,choices=PROGRAM, blank=True, default=PROGRAM[0][0])
fac_number = models.CharField(max_length=255)
email = models.CharField(max_length=255)
group = models.ForeignKey(HierarchyUnit, limit_choices_to={'type_value': HierarchyUnit.GROUP}, bla
|
nk=True, default=None, null=True)
events = models.ManyToManyField(Event, blank=True, default=None, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
from_user = models.ForeignKey(Student, blank=True, default=None, null=True)
event = models.ForeignKey(Event, blank=True, default=None, null=True)
start_date = models.DateField()
end_date = models.DateField()
dtstamp = models.DateField(default=dat
|
etime.now())
desc = models.TextField()
|
commtrack/commtrack-core
|
apps/xformmanager/forms.py
|
Python
|
bsd-3-clause
| 1,428
| 0.009804
|
from django import forms
from models import FormDataGroup
import re
# On this page, users can upload an xsd file from their laptop
# Then they get redirected to a page where they can download the xsd
class RegisterXForm(forms.Form):
file = forms.FileField()
form_display_name= forms.CharField(max_length=128, label=u'Form Display Name')
class SubmitDataForm(forms.Form):
file = forms.FileField()
class FormDataGroupForm(forms.ModelForm):
"""Form for basic form group data"""
display_name = forms.CharField(widget=forms.TextInput(attr
|
s={'size':'80'}))
view_name = forms.CharField(widget=forms.TextInput(attrs={'size':'40'}))
def clean_view_name(self):
view_name = self.cleaned_data["view_name"]
if not re.match(r"^\w+$", view_name):
raise forms.Validati
|
onError("View name can only contain numbers, letters, and underscores!")
# check that the view name is unique... if it was changed.
if self.instance.id:
if FormDataGroup.objects.get(id=self.instance.id).view_name != view_name and \
FormDataGroup.objects.filter(view_name=view_name).count() > 0:
raise forms.ValidationError("Sorry, view name %s is already in use! Please pick a new one." % view_name)
return self.cleaned_data["view_name"]
class Meta:
model = FormDataGroup
fields = ("display_name", "view_name")
|
saltastro/pysalt
|
proptools/ImageDisplay.py
|
Python
|
bsd-3-clause
| 3,112
| 0.008355
|
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
############################################################################
#!/usr/bin/env python
"""
ImageDisplay--Class for displaying and interacting with ds9
Author V
|
ersion Date
-----------------------------------------------
S M Crawford (SAAO) 0.1 19 Jun 2011
"""
import os
import pyds9 as ds9
class ImageDisplay:
def __init__(self, target='ImageDisplay:*'):
self.ds9 = ds9.ds9()
def display(self, filename, pa=None):
cmd='file %s' % filename
self.ds9.set(cmd)
self.ds9.set('zscale')
self.ds9.set('match frames wcs')
# print pa
if pa:
self.ds9.
|
set('rotate to %f' % pa)
else:
self.ds9.set('rotate to %f' % 0)
def regions(self, rgnstr):
cmd = 'regions %s'
def rssregion(self, ra, dec):
"""Plot the FOV for RSS"""
# cmd='color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=0 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\nfk5\ncircle(%s, %s, 4\')' % (ra, dec)
# cmd='fk5\ncircle(%s, %s, 4\') # color=yellow background dashlist=8 3 width=1 font="helvetica 10 normal roman" select=0 highlite=1 dash=0 fixed=0 edit=0 move=0 delete=0 include=1 source=1\n' % (ra, dec)
# ds9.set(cmd)
self.ds9.set('regions', 'fk5; circle(%f,%f,4\') # color=yellow background dashlist=8 3 width=3 font="helvetica 10 normal roman" select=0 highlite=1 dash=0 fixed=0 edit=0 move=0 delete=0 include=1 source=1'%(ra, dec))
def rotate(self, angle):
"""Rotate the image"""
self.ds9.set('rotate to %f' % angle)
def regionfromfile(self, regfile, d=None, rformat='ds9'):
cmd='regions %s -format %s' % (regfile, rformat)
self.ds9.set(cmd)
def deleteregions(self):
"""Delete all regions in the frame"""
cmd='regions delete all'
self.ds9.set(cmd)
def getregions(self):
"""Return a list of regions"""
rgnstr=self.ds9.get('regions -system fk5')
i = 0
newslits = {}
#print rgnstr
for l in rgnstr.split('\n'):
tags = ''
# work out how to use tags and just deal with "slit" tags
if l.startswith('box'):
#first look for tags
l = l[4:].split('#')
if len(l) > 1:
tags = l[-1]
l = l[0][:-2].split(',')
newslits[i] = [l, tags]
i += 1
elif l.startswith('circle'):
l = l[7:].split('#')
#print l
if len(l) > 1:
tags=l
l = l[0][:-2].split(',')
newslits[i] = [l, tags]
i += 1
return newslits
|
privacyidea/privacyidea
|
privacyidea/lib/challenge.py
|
Python
|
agpl-3.0
| 5,837
| 0.000857
|
# -*- coding: utf-8 -*-
# privacyIDEA is a fork of LinOTP
#
# 2014-12-07 Cornelius Kölbel <cornelius@privacyidea.org>
#
# Copyright (C) 2014 Cornelius Kölbel
# License: AGPLv3
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a helper module for the challenges database table.
It is used by the lib.tokenclass
The method is tested in test_lib_challenges
"""
import logging
import six
from .log import log_with
from ..models import Challenge
from privacyidea.lib.error import ParameterError
log = logging.getLogger(__name__)
@log_with(log)
def get_challenges(serial=None, transaction_id=None, challenge=None):
"""
This returns a list of database challenge objects.
:param serial: challenges for this very serial number
:param transaction_id: challenges with this very transaction id
:param challenge: The challenge to be found
:return: list of objects
"""
sql_query = Challenge.query
if serial is not None:
# filter for serial
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None:
# filter for transaction id
sql_query = sql_q
|
uery.filter(Challenge.transaction_id ==
transaction_id)
if challenge is not None:
# filter for this challenge
sql_query = sql_query.filter(Challenge.challenge == challenge)
challenges = sql_query.all()
return
|
challenges
@log_with(log)
def get_challenges_paginate(serial=None, transaction_id=None,
sortby=Challenge.timestamp,
sortdir="asc", psize=15, page=1):
"""
This function is used to retrieve a challenge list, that can be displayed in
the Web UI. It supports pagination.
Each retrieved page will also contain a "next" and a "prev", indicating
the next or previous page. If either does not exist, it is None.
:param serial: The serial of the token
:param transaction_id: The transaction_id of the challenge
:param sortby: Sort by a Challenge DB field. The default is
Challenge.timestamp.
:type sortby: A Challenge column or a string.
:param sortdir: Can be "asc" (default) or "desc"
:type sortdir: basestring
:param psize: The size of the page
:type psize: int
:param page: The number of the page to view. Starts with 1 ;-)
:type page: int
:return: dict with challenges, prev, next and count
:rtype: dict
"""
sql_query = _create_challenge_query(serial=serial,
transaction_id=transaction_id)
if isinstance(sortby, six.string_types):
# convert the string to a Challenge column
cols = Challenge.__table__.columns
sortby = cols.get(sortby)
if sortdir == "desc":
sql_query = sql_query.order_by(sortby.desc())
else:
sql_query = sql_query.order_by(sortby.asc())
pagination = sql_query.paginate(page, per_page=psize,
error_out=False)
challenges = pagination.items
prev = None
if pagination.has_prev:
prev = page-1
next = None
if pagination.has_next:
next = page + 1
challenge_list = []
for challenge in challenges:
challenge_dict = challenge.get()
challenge_list.append(challenge_dict)
ret = {"challenges": challenge_list,
"prev": prev,
"next": next,
"current": page,
"count": pagination.total}
return ret
def _create_challenge_query(serial=None, transaction_id=None):
"""
This function create the sql query for fetching transaction_ids. It is
used by get_challenge_paginate.
:return: An SQLAlchemy sql query
"""
sql_query = Challenge.query
if serial is not None and serial.strip("*"):
# filter for serial
if "*" in serial:
# match with "like"
sql_query = sql_query.filter(Challenge.serial.like(serial.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.serial == serial)
if transaction_id is not None and transaction_id.strip("*"):
# filter for serial
if "*" in transaction_id:
# match with "like"
sql_query = sql_query.filter(Challenge.transaction_id.like(
transaction_id.replace(
"*", "%")))
else:
# exact match
sql_query = sql_query.filter(Challenge.transaction_id == transaction_id)
return sql_query
def extract_answered_challenges(challenges):
"""
Given a list of challenge objects, extract and return a list of *answered* challenge.
A challenge is answered if it is not expired yet *and* if its ``otp_valid`` attribute
is set to True.
:param challenges: a list of challenge objects
:return: a list of answered challenge objects
"""
answered_challenges = []
for challenge in challenges:
# check if we are still in time.
if challenge.is_valid():
_, status = challenge.get_otp_status()
if status is True:
answered_challenges.append(challenge)
return answered_challenges
|
drestuart/delvelib
|
src/world/WorldMapClass.py
|
Python
|
lgpl-3.0
| 6,833
| 0.009074
|
'''
Created on Feb 26, 2014
@author: dstuart
'''
import LevelClass as L
import Util as U
class Region(object):
def __init__(self, **kwargs):
self.mapTiles = set()
self.name = None
self.worldMap = None
# TODO:
# worldMapId = Column(Integer, ForeignKey("levels.id"))
def addTile(self, tile):
self.mapTiles.add(tile)
tile.setRegion(self)
def replaceTile(self, oldtile, newtile):
assert oldtile.getXY() == newtile.getXY()
self.mapTiles.remove(oldtile)
self.addTile(newtile)
def getTileType(self):
return self.tileType
class WorldMap(L.MapBase):
def __init__(self, **kwargs):
|
super(WorldMap, self).__init__(**kwargs)
self.name = None
self.mapTiles
|
= set()
self.regions = set()
self.num_regions = kwargs['num_regions']
self.creatures = set()
# Initialize self.hasTile
self.hasTile = []
for dummyx in range(self.width):
newCol = []
for dummyy in range(self.height):
newCol.append(False)
self.hasTile.append(newCol)
def load(self):
pass
def getMapTiles(self):
return self.mapTiles
def addTile(self, tile):
self.mapTiles.add(tile)
self.hasTile[tile.getX()][tile.getY()] = True
if tile.getLevel() is not self:
tile.setLevel(self)
def replaceTile(self, newtile):
oldtile = self.getTile(newtile.getX(), newtile.getY())
assert oldtile.getXY() == newtile.getXY()
reg = oldtile.getRegion()
if reg:
reg.replaceTile(oldtile, newtile)
oldnumtiles = len(self.mapTiles)
self.mapTiles.remove(oldtile)
oldtile.remove()
self.addTile(newtile)
self.tileArray[newtile.getX()][newtile.getY()] = newtile
newnumtiles = len(self.mapTiles)
assert newnumtiles == oldnumtiles
def buildTileArray(self):
self.tileArray = []
# Initialize
for dummyx in range(self.width):
newCol = []
for dummyy in range(self.height):
newCol.append(None)
self.tileArray.append(newCol)
# Fill in
for tile in self.mapTiles:
assert tile is not None
self.tileArray[tile.x][tile.y] = tile
def getTile(self, x, y):
if not self.__dict__.get('tileArray'):
# print "self.tileArray not initialized!"
self.buildTileArray()
if x >= 0 and x < self.width and y >= 0 and y < self.height:
return self.tileArray[x][y]
return None
def getRegions(self):
return self.regions
def addRegion(self, reg):
self.regions.add(reg)
def distance(self, tilea, tileb):
return self.coordinateDistance(tilea.getX(), tileb.getX(), tilea.getY(), tileb.getY())
def coordinateDistance(self, xa, xb, ya, yb):
return U.ChebyshevDistance(xa, xb, ya, yb)
def getTilesInRadius(self, radius, centerX, centerY, tileClass=None):
assert radius >= 0 and radius == int(radius)
tiles = []
for rad in range(0, radius + 1):
tiles += self.getTilesAtRadius(rad, centerX, centerY, tileClass)
return tiles
def getTilesInRange(self, rmin, rmax, centerX, centerY, tileClass=None):
assert rmin <= rmax and rmin > 0
tiles = []
for rad in range(rmin, rmax + 1):
tiles += self.getTilesAtRadius(rad, centerX, centerY, tileClass)
return tiles
def getNearestTile(self, fromTile, tileClass):
import Game as G
random = G.getRandom()
centerX, centerY = fromTile.getXY()
radius = 1
while True:
matches = self.getTilesAtRadius(radius, centerX, centerY, tileClass)
if not matches:
radius += 1
continue
return random.choice(matches)
def getTilesAtRadius(self, radius, centerX, centerY, tileClass=None):
assert radius >= 0 and radius == int(radius)
centerTile = self.getTile(centerX, centerY)
tiles = []
if radius == 0:
return [centerTile]
x1 = max(0, centerX - radius)
y1 = max(0, centerY - radius)
x2 = min(centerX + radius, self.width)
y2 = min(centerY + radius, self.height)
for x in range(x1, x2 + 1):
tile1 = self.getTile(x, y1)
tile2 = self.getTile(x, y2)
if tile1 and (tileClass is None or isinstance(tile1, tileClass)): tiles.append(tile1)
if tile2 and (tileClass is None or isinstance(tile2, tileClass)): tiles.append(tile2)
for y in range(y1 + 1, y2):
tile1 = self.getTile(x1, y)
tile2 = self.getTile(x2, y)
if tile1 and (tileClass is None or isinstance(tile1, tileClass)): tiles.append(tile1)
if tile2 and (tileClass is None or isinstance(tile2, tileClass)): tiles.append(tile2)
return tiles
def getTilesToDraw(self, dummyx, dummyy, cameradims, visibility = True):
retArray = []
camx, camy, camwidth, camheight = cameradims
for tile in self.mapTiles:
if tile:
x = tile.x
y = tile.y
# Is the tile in the camera's range?
if (x < camx or x >= camx + camwidth or y < camy or y >= camy + camheight):
continue
symbol = tile.getSymbol()
color = tile.getColor()
background = tile.getBackgroundColor()
# Good lord, what made me think this was a good idea?
# symbol = symbol.encode('ascii', 'ignore')
retArray.append((x, y, symbol, color, background))
return retArray
def getAdjacentTiles(self, fromTile):
# return self.getTilesAtRadius(1, fromTile.getX(), fromTile.getY())
tiles = []
x, y = fromTile.getXY()
for i in (-1, 0, 1):
for j in (-1, 0, 1):
if not (i == 0 and j == 0):
tile = self.getTile(x + i, y + j)
if tile: tiles.append(tile)
return tiles
def handleRemovedCreature(self, tile, creature):
pass
def handleAddedCreature(self, tile, creature):
pass
def buildMap(self):
raise NotImplementedError("buildMap() not implemented, use a subclass")
|
jwlawson/tensorflow
|
tensorflow/contrib/rnn/python/ops/lstm_ops.py
|
Python
|
apache-2.0
| 24,941
| 0.005052
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl._LayerRNNCell # pylint: disable=invalid-name,protected-access
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
|
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peepho
|
le connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pyl
|
github/codeql
|
python/ql/test/3/library-tests/PointsTo/inheritance/test.py
|
Python
|
mit
| 496
| 0.012097
|
class Base(object):
def meth(self):
|
pass
class Derived1(Base):
def meth(self):
return super().meth()
class Derived2(Derived1):
def meth(self):
return super().meth()
class Derived3(Derived1):
pass
class Derived4(Deri
|
ved3, Derived2):
def meth(self):
return super().meth()
class Derived5(Derived1):
def meth(self):
return super().meth()
class Derived6(Derived5, Derived2):
def meth(self):
return super().meth()
|
ujvl/ray-ng
|
python/ray/tests/test_memory_scheduling.py
|
Python
|
apache-2.0
| 4,709
| 0
|
import numpy as np
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
MB = 1024 * 1024
@ray.remote(memory=100 * MB)
class Actor(object):
def __init__(self):
pass
def ping(self):
return "ok"
@ray.remote(object_store_memory=100 * MB)
class Actor2(object):
def __init__(self):
pass
def ping(self):
return "ok"
def train_oom(config, reporter):
ray.put(np.zeros(200 * 1024 * 1024))
reporter(result=123)
class TestMemoryScheduling(unittest.TestCase):
def testMemoryRequest(self):
try:
ray.init(num_cpus=1, memory=200 * MB)
# fits first 2
a = Actor.remote()
b = Actor.remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(
|
ok), 0)
finally:
ray.shutdown()
def testObjectStoreMemoryRequest(self):
try:
ray.init(num_cpus=1, object_store_memory=300 * MB)
# fits first 2 (70% allowed)
a = Actor2.remote()
b = Actor2.
|
remote()
ok, _ = ray.wait(
[a.ping.remote(), b.ping.remote()],
timeout=60.0,
num_returns=2)
self.assertEqual(len(ok), 2)
# does not fit
c = Actor2.remote()
ok, _ = ray.wait([c.ping.remote()], timeout=5.0)
self.assertEqual(len(ok), 0)
finally:
ray.shutdown()
def testTuneDriverHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"memory": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_PG_" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneDriverStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda: tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
# too large
"object_store_memory": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneWorkerHeapLimit(self):
try:
_register_all()
result = tune.run(
"PG",
stop={"timesteps_total": 10000},
config={
"env": "CartPole-v0",
"num_workers": 1,
"memory_per_worker": 100 * 1024 * 1024, # too little
},
raise_on_failed_trial=False)
self.assertEqual(result.trials[0].status, "ERROR")
self.assertTrue(
"RayOutOfMemoryError: Heap memory usage for ray_Rollout" in
result.trials[0].error_msg)
finally:
ray.shutdown()
def testTuneWorkerStoreLimit(self):
try:
_register_all()
self.assertRaisesRegexp(
ray.tune.error.TuneError,
".*Insufficient cluster resources.*",
lambda:
tune.run("PG", stop={"timesteps_total": 0}, config={
"env": "CartPole-v0",
"num_workers": 1,
# too large
"object_store_memory_per_worker": 10000 * 1024 * 1024,
}))
finally:
ray.shutdown()
def testTuneObjectLimitApplied(self):
try:
result = tune.run(
train_oom,
resources_per_trial={"object_store_memory": 150 * 1024 * 1024},
raise_on_failed_trial=False)
self.assertTrue(result.trials[0].status, "ERROR")
self.assertTrue("ObjectStoreFullError: Failed to put" in
result.trials[0].error_msg)
finally:
ray.shutdown()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
StackVista/sts-agent-integrations-core
|
sqlserver/check.py
|
Python
|
bsd-3-clause
| 27,601
| 0.002935
|
'''
Check the performance counters from SQL Server
See http://blogs.msdn.com/b/psssql/archive/2013/09/23/interpreting-the-counter-values-from-sys-dm-os-performance-counters.aspx
for information on how to report the metrics available in the sys.dm_os_performance_counters table
'''
# stdlib
import traceback
from contextlib import contextmanager
# 3rd party
import adodbapi
try:
import pyodbc
except ImportError:
pyodbc = None
from config import _is_affirmative
# project
from checks import AgentCheck
EVENT_TYPE = SOURCE_TYPE_NAME = 'sql server'
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
# Constant for SQLServer cntr_type
PERF_LARGE_RAW_BASE = 1073939712
PERF_RAW_LARGE_FRACTION = 537003264
PERF_AVERAGE_BULK = 1073874176
PERF_COUNTER_BULK_COUNT = 272696576
PERF_COUNTER_LARGE_RAWCOUNT = 65792
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where counter_name = ?;'''
BASE_NAME_QUERY = '''select distinct counter_name
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;''' % PERF_LARGE_RAW_BASE
INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where counter_name=? and instance_name!='_Total';'''
VALUE_AND_BASE_QUERY = '''select cntr_value
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?)
and instance_name=?
order by cntr_type;'''
DATABASE_EXISTS_QUERY = 'select name from sys.databases;'
class SQLConnectionError(Exception):
"""
Exception raised for SQL instance connection issues
"""
pass
class SQLServer(AgentCheck):
SERVICE_CHECK_NAME = 'sqlserver.can_connect'
# FIXME: 6.x, set default to 5s (like every check)
DEFAULT_COMMAND_TIMEOUT = 30
DEFAULT_DATABASE = 'master'
DEFAULT_DRIVER = 'SQL Server'
DEFAULT_DB_KEY = 'database'
PROC_GUARD_DB_KEY = 'proc_only_if_database'
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'Buffer cache hit ratio', ''), # RAW_LARGE_FRACTION
('sqlserver.buffer.page_life_expectancy', 'Page life expectancy', ''), # LARGE_RAWCOUNT
('sqlserver.stats.batch_requests', 'Batch Requests/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_compilations', 'SQL Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_recompilations', 'SQL Re-Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.connections', 'User Connections', ''), # LARGE_RAWCOUNT
('sqlserver.stats.lock_waits', 'Lock Waits/sec', '_Total'), # BULK_COUNT
('sqlserver.access.page_splits', 'Page Splits/sec', ''), # BULK_COUNT
('sqlserver.stats.procs_blocked', 'Processes blocked', ''), # LARGE_RAWCOUNT
('sqlserver.buffer.checkpoint_pages', 'Checkpoint pages/sec', '') # BULK_COUNT
]
valid_connectors = ['adodbapi']
if pyodbc is not None:
valid_connectors.append('odbc')
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Cache connections
self.connections = {}
self.failed_connections = {}
self.instances_metrics = {}
self.existing_databases = None
self.do_check = {}
self.proc_type_mapping = {
'gauge': self.gauge,
'rate' : self.rate,
'histogram': self.histogram
}
self.connector = init_config.get('connector', 'adodbapi')
if not self.connector.lower() in self.valid_connectors:
self.log.error("Invalid database connector %s, defaulting to adodbapi" % self.connector)
self.connector = 'adodbapi'
# Pre-process the list of metrics to collect
self.custom_metrics = init_config.get('custom_metrics', [])
for instance in instances:
try:
instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY)
self.do_check[instance_key] = True
# check to see if the database exists before we try any connections to it
with self.open_managed_db_connections(instance, None, db_name=self.DEFAULT_DATABASE):
db_exists, context = self._check_db_exists(instance)
if db_exists:
if instance.get('stored_procedure') is None:
with self.open_managed_db_connections(instance, self.DEFAULT_DB_KEY):
self._make_metric_list_to_collect(instance, self.custom_metrics)
else:
# How much do we care that the DB doesn't exist?
ignore = _is_affirmative(instance.get("ignore_missing_database", False))
if ignore is not None and ignore:
# not
|
much : we expect it. leave checks disabled
self.do_check[instance_key] = False
self.log.warning("Database %s does not exist. Disabling checks for this instance." % (context))
else:
# yes we do. Keep trying
self.log.error("Database %s does not exist. Fix issue and restart agent" % (context))
except S
|
QLConnectionError:
self.log.exception("Skipping SQL Server instance")
continue
def _check_db_exists(self, instance):
"""
Check if the database we're targeting actually exists
If not then we won't do any checks
This allows the same config to be installed on many servers but fail gracefully
"""
dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY)
context = "%s - %s" % (host, database)
if self.existing_databases is None:
cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE)
try:
self.existing_databases = {}
cursor.execute(DATABASE_EXISTS_QUERY)
for row in cursor:
self.existing_databases[row.name] = True
except Exception, e:
self.log.error("Failed to check if database %s exists: %s" % (database, e))
return False, context
finally:
self.close_cursor(cursor)
return database in self.existing_databases, context
def _make_metric_list_to_collect(self, instance, custom_metrics):
"""
Store the list of metrics to collect by instance_key.
Will also create and cache cursors to query the db.
"""
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
metrics_to_collect.append(self.typed_metric(instance,
name,
counter_name,
base_name,
None,
sql_type,
instance_name,
None))
except SQLConnectionError:
raise
except Exception:
self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True)
continue
# Load any custom metrics from conf.d/sqlserver.yaml
for row in custom_metrics:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type =
|
drnextgis/QGIS
|
python/plugins/processing/algs/grass7/ext/r_li_padcv.py
|
Python
|
gpl-2.0
| 1,324
| 0
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_padcv.py
-------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
*******
|
********************************************************************
"""
from __future__ import absolute_import
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg):
return checkMovingWindow(alg)
def processCommand(alg):
|
configFile(alg)
|
danclaudiupop/django-test-html-form
|
setup.py
|
Python
|
bsd-3-clause
| 546
| 0
|
from setuptools import setup, find_package
|
s
setup(
name='django-test-html-form',
version='0.1',
description="Make your Django HTML form tests more explicit and concise.",
long_description=open('README.rst').read(),
keywords='django test assert',
author='Dan Claudiu Pop',
author_email='dancladiupop@gmail.com',
url='https://github.com/danclaudiupop/assertHtmlForm',
license='BSD License',
packages=find_packages(),
|
include_package_data=True,
install_requires=[
'beautifulsoup4',
],
)
|
mirumee/django-messages
|
django_messages/tests.py
|
Python
|
bsd-3-clause
| 2,362
| 0.008044
|
import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from django_messages.models import Message
class SendTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('user1', 'user1@example.com', '123456')
self.user2 = User.objects.create_user('user2', 'user2@example.com', '123456')
self.msg1 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text', body='Body Text')
self.msg1.save()
def testBasic(self):
self.assertEquals(self.msg1.sender, self.user1)
self.assertEquals(self.msg1.recipient, self.user2)
self.assertEquals(self.msg1.subject, 'Subject Text')
self.assertEquals(self.msg1.body, 'Body Text')
self.assertEquals(self.user1.sent_messages.count(), 1)
self.assertEquals(self.user1.received_messages.count(), 0)
self.assertEquals(self.user2.received_messages.count(), 1)
self.assertEquals(self.user2.sent_messages.count(), 0)
class DeleteTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('user3', 'user3@example.com', '123456')
self.user2 = User.objects.create_user('user4', 'user4@example.com', '123456')
self.msg1 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text 1', body='Body Text 1')
self.msg2 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text 2', body='Body
|
Text 2')
self.msg1.sender_deleted_at = datetime.datetime.now()
self.msg2.recipient_deleted_at = datetime.datetime.now()
self.msg1.sav
|
e()
self.msg2.save()
def testBasic(self):
self.assertEquals(Message.objects.outbox_for(self.user1).count(), 1)
self.assertEquals(Message.objects.outbox_for(self.user1)[0].subject, 'Subject Text 2')
self.assertEquals(Message.objects.inbox_for(self.user2).count(),1)
self.assertEquals(Message.objects.inbox_for(self.user2)[0].subject, 'Subject Text 1')
#undelete
self.msg1.sender_deleted_at = None
self.msg2.recipient_deleted_at = None
self.msg1.save()
self.msg2.save()
self.assertEquals(Message.objects.outbox_for(self.user1).count(), 2)
self.assertEquals(Message.objects.inbox_for(self.user2).count(),2)
|
shankari/folium
|
folium/folium.py
|
Python
|
mit
| 14,914
| 0
|
# -*- coding: utf-8 -*-
"""
Folium
-------
Make beautiful, interactive maps with Python and Leaflet.js
"""
from __future__ import absolute_import
from branca.colormap import StepColormap
from branca.utilities import color_brewer
from .map import LegacyMap, FitBounds
from .features import GeoJson, TopoJson
class Map(LegacyMap):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from a list of built-in tiles,
pass a custom URL or pass `None` to create a map without tiles.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
crs : str, default 'EPSG3857'
Defines coordinate reference systems for projecting geographical points
into pixel (screen) coordinates and back.
You can use Leaflet's values :
* EPSG3857 : The most common CRS for online maps, used by almost all
free and commercial tile providers. Uses Spherical Mercator projection.
Set in by default in Map's crs option.
* EPSG4326 : A common CRS among GIS enthusiasts.
Uses simple Equirectangular projection.
* EPSG3395 : Rarely used by some commercial tile providers.
Uses Elliptical Mercator projection.
* Simple : A simple CRS that maps longitude and latitude into
x and y directly. May be used for maps of flat surfaces
(e.g. game maps). Note that the y axis should still be inverted
(going from bottom to top).
control_scale : bool, default False
Whether to add a control scale on the map.
prefer_canvas : bool, default False
Forces Leaflet to use the Canvas back-end (if available) for
vector layers ins
|
tead of SVG. This can increase performance
considerably in some cases (e.g. many thousands of circle
markers on the map).
no_touch : bool, default F
|
alse
Forces Leaflet to not use touch events even if it detects them.
disable_3d : bool, default False
Forces Leaflet to not use hardware-accelerated CSS 3D
transforms for positioning (which may cause glitches in some
rare environments) even if they're supported.
Returns
-------
Folium LegacyMap Object
Examples
--------
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... width=750, height=500)
>>> map = folium.LegacyMap(location=[45.523, -122.675],
tiles='Mapbox Control Room')
>>> map = folium.LegacyMap(location=(45.523, -122.675), max_zoom=20,
tiles='Cloudmade', API_key='YourKey')
>>> map = folium.LegacyMap(location=[45.523, -122.675], zoom_start=2,
tiles=('http://{s}.tiles.mapbox.com/v3/'
'mapbox.control-room/{z}/{x}/{y}.png'),
attr='Mapbox attribution')
"""
def fit_bounds(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
Examples
--------
>>> map.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])
"""
self.add_child(FitBounds(bounds,
padding_top_left=padding_top_left,
padding_bottom_right=padding_bottom_right,
padding=padding,
max_zoom=max_zoom,
)
)
def choropleth(self, geo_path=None, geo_str=None, data_out='data.json',
data=None, columns=None, key_on=None, threshold_scale=None,
fill_color='blue', fill_opacity=0.6, line_color='black',
line_weight=1, line_opacity=1, legend_name="",
topojson=None, reset=False, smooth_factor=None,
highlight=None):
"""
Apply a GeoJSON overlay to the map.
Plot a GeoJSON overlay on the base map. There is no requirement
to bind data (passing just a GeoJSON plots a single-color overlay),
but there is a data binding option to map your columnar data to
different feature objects with a color scale.
If data is passed as a Pandas DataFrame, the "columns" and "key-on"
keywords must be included, the first to indicate which DataFrame
columns to use, the second to indicate the layer in the GeoJSON
on which to key the data. The 'columns' keyword does not need to be
passed for a Pandas series.
Colors are generated from color brewer (http://colorbrewer2.org/)
sequential palettes on a D3 threshold scale. The scale defaults to the
following quantiles: [0, 0.5, 0.75, 0.85, 0.9]. A custom scale can be
passed to `threshold_scale` of length <=6, in order to match the
color brewer range.
TopoJSONs can be passed as "geo_path", but the "topojson" keyword must
also be passed with the reference to the topojson objects to convert.
See the topojson.feature method in the TopoJSON API reference:
https://github.com/mbostock/topojson/wiki/API-Reference
Parameters
----------
geo_path: string, default None
URL or File path to your GeoJSON data
geo_str: string, default None
String of GeoJSON, alternative to geo_path
data_out: string, default 'data.json'
Path to write Pandas DataFrame/Series to JSON if binding data
data: Pandas DataFrame or Series, default None
Data to bind to the GeoJSON.
columns: dict or tuple, default None
If the data is a Pandas DataFrame, the columns of data to be bound.
Must pass column 1 as the key, and column 2 the values.
key_o
|
Anfauglith/iop-hd
|
test/functional/zapwallettxes.py
|
Python
|
mit
| 3,234
| 0.002474
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two iopd nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxed and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (IoPTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but
|
not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].ge
|
ttransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
|
ffunenga/virtuallinks
|
tests/core/core.py
|
Python
|
mit
| 375
| 0
|
import sys
import os
import shutil
def import_package(name):
_filepath = os.path.abspath(__file__)
path = backup = os.path.dirname(_filepath)
while os.path.basename(path) != name:
path = os.pat
|
h.join(path, '..')
path = os.path.abspath(path)
if path != backup:
|
sys.path.insert(0, path)
module = __import__(name)
return module
|
yifeng-li/DECRES
|
rbm.py
|
Python
|
bsd-3-clause
| 22,163
| 0.006723
|
"""
A module of restricted Boltzmann machine (RBM) modified
from the Deep Learning Tutorials (www.deeplearning.net/tutorial/).
Copyright (c) 2008-2013, Theano Development Team All rights reserved.
Modified by Yifeng Li
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: yifeng.li.cn@gmail.com
"""
from __future__ import division
import time
import math
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import classification as cl
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=500, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=t
|
heano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
|
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionar
|
w1ll1am23/home-assistant
|
tests/components/zwave_js/common.py
|
Python
|
apache-2.0
| 1,508
| 0.002653
|
"""Provide common test tools for Z-Wave JS."""
AIR_TEMPERATURE_SENSOR = "sensor.multisensor_6_air_temperature"
HUMIDITY_SENSOR = "sensor.multisensor_6_humidity"
ENERGY_SENSOR = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed_2"
POWER_SENSOR = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed"
SWITCH_ENTITY = "switch.smart_plug_with_two_usb_ports"
LOW_BATTERY_BINARY_SENSOR = "binary_sensor.multisensor_6_low_battery_level"
ENABLED_LEGACY_BINARY_SENSOR = "binary_sensor.z_wave_door_window_sensor_any"
DISABLED_LEGACY_BINARY_SENSOR = "binary_sensor.multisensor_6_any"
NOTIFICATION_MOTION_BINARY_SENSOR = (
"binary_sensor.multisensor_6_home_security_motion_detection"
)
NOTIFICATION_MOTION_SENS
|
OR = "sensor.multisensor_6_home_security_motion_sensor_status"
PROPERTY_DOOR_STATUS_BINARY_SENSOR = (
"binary_sensor.aug
|
ust_smart_lock_pro_3rd_gen_the_current_status_of_the_door"
)
CLIMATE_RADIO_THERMOSTAT_ENTITY = "climate.z_wave_thermostat"
CLIMATE_DANFOSS_LC13_ENTITY = "climate.living_connect_z_thermostat"
CLIMATE_EUROTRONICS_SPIRIT_Z_ENTITY = "climate.thermostatic_valve"
CLIMATE_FLOOR_THERMOSTAT_ENTITY = "climate.floor_thermostat"
CLIMATE_MAIN_HEAT_ACTIONNER = "climate.main_heat_actionner"
BULB_6_MULTI_COLOR_LIGHT_ENTITY = "light.bulb_6_multi_color"
EATON_RF9640_ENTITY = "light.allloaddimmer"
AEON_SMART_SWITCH_LIGHT_ENTITY = "light.smart_switch_6"
ID_LOCK_CONFIG_PARAMETER_SENSOR = (
"sensor.z_wave_module_for_id_lock_150_and_101_config_parameter_door_lock_mode"
)
|
dmnfarrell/peat
|
PEATSA/Tools/HIVTools/CombinationConverter.py
|
Python
|
mit
| 4,305
| 0.029268
|
#! /usr/bin/env python
import sys
import PEAT_SA.Core as Core
import Protool
import itertools
def getPathSequence(combinations):
path = []
currentSet = set(combinations[0].split(','))
path.append(combinations[0])
for i in range(1, len(combinations)):
newSet = set(combinations[i].split(','))
newElement = newSet.difference(currentSet)
path.append(list(newElement)[0])
currentSet = newSet
return path
def getTypePath(combinations, typeMap):
path = getPathSequence(combinations)
print 'Mutation accumulation patern ', path
types = []
for el in path:
try:
types.append(typeMap[el])
except KeyError:
types.append('N/A')
print types
return types
def codesToCombinations(mutationCodes):
'''Converts a mutation code, which involves both chains, to a list of non-chain specific codes
e.g. A84V+B84V+A32F+B32F => 84V, 32F'''
holder = []
for code in mutationCodes:
mutationSet = Core.Data.MutationSet(code)
holder.append(list(set([code[1:] for code in mutationSet.reducedMutationCodes()])))
return holder
def observedPathsForCombination(combination, observedCombinations):
print '\nSearching for observed paths to combination %s' % combination
numberOfMutations = len(combination)
pathways = itertools.permutations(combination, numberOfMutations)
checked = 0
found = 0
observedPathways = []
for pathway in pathways:
#print 'Putative pathway %s' % list(pathway)
parts = []
for j in range(1,numberOfMutations + 1):
observed = False
sub = pathway[:j]
#print '\tChecking subpath %s is observed' % list(sub)
subPerms = itertools.permutations(sub)
#Check if this sub combination
for subPerm in subPerms:
subPerm = ','.join(subPerm)
if observedCombinations.count(subPerm) == 1:
#print '\tObserved Sub %s!' % subPerm
parts.append(subPerm)
observed = True
break
if observed is False:
break
if observed:
found = found + 1
observedPathways.append(parts)
checked = checked + 1
print '%d putative pathways. %d found\n' % (checked, found)
return observedPathways
def vitalityProfileForPath(path, vitalities, fold):
print 'Vitalities :',
values = []
for combination in path:
print vitalities[combination],
values.append(vitalities[combination])
print '\n',
print 'Foldn :',
folds =
|
[]
for combination in path:
print fold[combination],
folds.append(fold[combination])
print '\n'
return values, folds
#Read in types
typeData = Core.Matrix.matrixFromCSVFile(sys.argv[2])
typeIndex = typeData.indexOfColumnWithHeader('Type')
#Get all entries for specified drug
drugName = sys.argv[4]
trimMatrix
|
= Core.Matrix.PEATSAMatrix(rows=[[0]*9], headers=typeData.columnHeaders())
drugNameIndex = typeData.indexOfColumnWithHeader('Drug Name')
for row in typeData:
if row[drugNameIndex] == drugName:
trimMatrix.addRow(row)
#Read in combinations
combinationData = Core.Matrix.matrixFromCSVFile(sys.argv[1])
mutationCodes = combinationData.column(0)
combinations = codesToCombinations(mutationCodes)
print combinations
vitalities = combinationData.columnWithHeader(drugName+'Vitality')
fold = combinationData.columnWithHeader(drugName+'Fold')
pdb = Protool.structureIO()
pdb.readpdb(sys.argv[3])
types = []
combinationStrings = [','.join(combo) for combo in combinations]
#Skip WT
mutations = trimMatrix.columnWithHeader('Mutations')[1:]
mutations = [(el[:-2] + el[-1]) for el in mutations]
typeMap = dict(zip(mutations, trimMatrix.column(typeIndex)))
filteredPaths = []
for combination in combinations:
paths = observedPathsForCombination(combination, combinationStrings)
for path in paths:
accumulationPattern = getPathSequence(path)
if accumulationPattern[-1][:2] == '46' and len(accumulationPattern) > 1:
print 'Found paths ending with mutation to 46'
filteredPaths.append(path)
results = []
for path in filteredPaths:
#typePath = getTypePath(path, typeMap)
profile, foldres = vitalityProfileForPath(path, dict(zip(combinationStrings, vitalities)), dict(zip(combinationStrings, fold)))
if profile[-2:].count('') == 0:
mutation = path[-2]
entry = [mutation, profile[-1] - profile[-2], foldres[-1]/foldres[-2]]
if results.count(entry) == 0:
results.append(entry)
else:
print 'Skipping - Missing data\n'
for m in results:
print m
|
quantumlib/OpenFermion-FQE
|
src/fqe/fqe_decorators.py
|
Python
|
apache-2.0
| 14,347
| 0.000767
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities and decorators for converting external types into the fqe
intrinsics
"""
#there are two places where access to protected members improves code quality
#pylint: disable=protected-access
from typing import Any, Dict, Tuple, Union, Optional, List
from functools import wraps
from itertools import permutations
import copy
import numpy
from openfermion import FermionOperator
from openfermion.utils import is_hermitian
from openfermion import normal_ordered
from fqe.hamiltonians import hamiltonian
from fqe.hamiltonians import general_hamiltonian
from fqe.hamiltonians import diagonal_hamiltonian
from fqe.hamiltonians import diagonal_coulomb
from fqe.hamiltonians import gso_hamiltonian
from fqe.hamiltonians import restricted_hamiltonian
from fqe.hamiltonians import sparse_hamiltonian
from fqe.hamiltonians import sso_hamiltonian
from fqe.openfermion_utils import largest_operator_index
from fqe.util import validate_tuple, reverse_bubble_list
from fqe.fqe_ops import fqe_ops_utils
def build_hamiltonian(ops: Union[FermionOperator, hamiltonian.Hamiltonian],
norb: int = 0,
conserve_number: bool = True,
e_0: complex = 0. + 0.j) -> 'hamiltonian.Hamiltonian':
"""Build a Hamiltonian object for FQE.
Args:
ops (FermionOperator, hamiltonian.Hamiltonian): input operator as \
FermionOperator. If a Hamiltonian is passed as an argument, \
this function returns as is.
norb (int): the number of orbitals in the system
conserve_number (bool): whether the operator conserves the number
e_0 (complex): the scalar part of the operator
Returns:
(hamiltonian.Hamiltonian): General Hamiltonian that is created from ops
"""
if isinstance(ops, hamiltonian.Hamiltonian):
return ops
if isinstance(ops, tuple):
validate_tuple(ops)
if norb != 0 and ops[0].shape[0] == norb:
return restricted_hamiltonian.RestrictedHamiltonian(ops, e_0=e_0)
else:
return general_hamiltonian.General(ops, e_0=e_0)
if not isinstance(ops, FermionOperator):
raise TypeError('Expected FermionOperator' \
' but received {}.'.format(type(ops)))
assert is_hermitian(ops)
out: Any
if len(ops.terms) <= 2:
out = sparse_hamiltonian.SparseHamiltonian(ops, e_0=e_0)
else:
if not conserve_number:
ops = transform_to_spin_broken(ops)
ops = normal_ordered(ops)
ops_rank, e_0 = split_openfermion_tensor(ops) # type: ignore
if norb == 0:
for term in ops_rank.values():
ablk, bblk = largest_operator_index(term)
norb = max(norb, ablk // 2 + 1, bblk // 2 + 1)
else:
norb = norb
ops_mat = {}
maxrank = 0
for rank, term in ops_rank.items():
index = rank // 2 - 1
ops_mat[index] = fermionops_tomatrix(term, norb)
maxrank = max(index, maxrank)
if len(ops_mat) == 1 and (0 in ops_mat):
out = process_rank2_matrix(ops_mat[0], norb=norb, e_0=e_0)
elif len(ops_mat) == 1 and \
(1 in ops_mat) and \
|
check_diagonal_coulomb(ops_mat[1]):
out = diagonal_coulomb.DiagonalCoulomb(ops_mat[1], e_0=e_0)
else:
|
dtypes = [xx.dtype for xx in ops_mat.values()]
dtypes = numpy.unique(dtypes)
assert len(dtypes) == 1
for i in range(maxrank + 1):
if i not in ops_mat:
mat_dim = tuple([2 * norb for _ in range((i + 1) * 2)])
ops_mat[i] = numpy.zeros(mat_dim, dtype=dtypes[0])
ops_mat2 = []
for i in range(maxrank + 1):
ops_mat2.append(ops_mat[i])
out = general_hamiltonian.General(tuple(ops_mat2), e_0=e_0)
out._conserve_number = conserve_number
return out
def transform_to_spin_broken(ops: 'FermionOperator') -> 'FermionOperator':
"""Convert a FermionOperator string from number broken to spin broken
operators.
Args:
ops (FermionOperator): input FermionOperator
Returns:
(FermionOperator): transformed FermionOperator to spin broken indexing
"""
newstr = FermionOperator()
for term in ops.terms:
opstr = ''
for element in term:
if element[0] % 2:
if element[1]:
opstr += str(element[0]) + ' '
else:
opstr += str(element[0]) + '^ '
else:
if element[1]:
opstr += str(element[0]) + '^ '
else:
opstr += str(element[0]) + ' '
newstr += FermionOperator(opstr, ops.terms[term])
return newstr
def split_openfermion_tensor(ops: 'FermionOperator'
) -> Tuple[Dict[int, 'FermionOperator'], complex]:
"""Given a string of openfermion operators, split them according to their
rank.
Args:
ops (FermionOperator): a string of OpenFermion Fermion Operators
Returns:
split dict[int] = FermionOperator: a list of Fermion Operators sorted
according to their rank.
"""
e_0 = 0. + 0.j
split: Dict[int, 'FermionOperator'] = {}
for term in ops:
rank = term.many_body_order()
if rank % 2:
raise ValueError('Odd rank term not accepted')
if rank == 0:
e_0 += term.terms[()]
else:
if rank not in split:
split[rank] = term
else:
split[rank] += term
return split, e_0
def fermionops_tomatrix(ops: 'FermionOperator', norb: int) -> numpy.ndarray:
"""Convert FermionOperators to a matrix.
Args:
ops (FermionOperator): input FermionOperator from OpenFermion
norb (int): the number of orbitals in the system
Returns:
(numpy.ndarray): resulting matrix
"""
ablk, bblk = largest_operator_index(ops)
if norb <= ablk // 2:
raise ValueError('Highest alpha index exceeds the norb of orbitals')
if norb <= bblk // 2:
raise ValueError('Highest beta index exceeds the norb of orbitals')
rank = ops.many_body_order()
if rank % 2:
raise ValueError('Odd rank operator not supported')
tensor_dim = [norb * 2 for _ in range(rank)]
index_mask = [0 for _ in range(rank)]
index_dict_dagger = [[0, 0] for _ in range(rank // 2)]
index_dict_nondagger = [[0, 0] for _ in range(rank // 2)]
tensor = numpy.zeros(tensor_dim, dtype=numpy.complex128)
for term in ops.terms:
for i in range(rank):
index = term[i][0]
if i < rank // 2:
if not term[i][1]:
raise ValueError('Found annihilation operator where' \
'creation is expected')
elif term[i][1]:
raise ValueError('Found creation operator where ' \
'annihilation is expected')
spin = index % 2
if spin == 1:
ind = (index - 1) // 2 + norb
else:
ind = index // 2
if i < rank // 2:
index_dict_dagger[i][0] = spin
index_dict_dagger[i][1] = ind
else:
index_dict_nondagger[i - rank // 2][0] = spin
index_dict_nondagger[i - rank // 2][1] = ind
parity = rever
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-containerservice/azure/mgmt/containerservice/models/container_service_diagnostics_profile.py
|
Python
|
mit
| 1,170
| 0.000855
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContainerServiceDiagnosticsProfile(Model):
"""Profile for diagnostics on the container service cluster.
:param vm_diagnostics: Profile for diagnostics on the container service
VMs.
:type vm_diagnostics:
~azure.mgmt.containerservice.models.ContainerServiceVMDiagnostics
"""
_validation = {
'vm_diagnostics': {'required': True},
}
_attribute_map = {
'vm_diagnostics': {'key': 'vmDiagnostics', 'type': 'ContainerServiceVMDiagnostics'},
}
de
|
f __init__(self, vm_diagnostics):
super(ContainerServ
|
iceDiagnosticsProfile, self).__init__()
self.vm_diagnostics = vm_diagnostics
|
omi/stl-api-gateway
|
omi_api/client.py
|
Python
|
mit
| 10,377
| 0.001253
|
# Copyright 2017 ContextLabs B.V.
import time
import hashlib
import urllib
import requests
import sawtooth_signing as signing
from base64 import b64decode
from random import randint
from sawtooth_omi.protobuf.work_pb2 import Work
from sawtooth_omi.protobuf.recording_pb2 import Recording
from sawtooth_omi.protobuf.identity_pb2 import IndividualIdentity
from sawtooth_omi.protobuf.identity_pb2 import OrganizationalIdentity
from sawtooth_omi.protobuf.txn_payload_pb2 import OMITransactionPayload
from sawtooth_omi.handler import FAMILY_NAME, OMI_ADDRESS_PREFIX, make_omi_address, _get_address_infix
from sawtooth_omi.handler import WORK, RECORDING, INDIVIDUAL, ORGANIZATION
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import BatchList
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
TAG_MAP = {
OrganizationalIdentity: ORGANIZATION,
Recording: RECORDING,
Work: WORK,
IndividualIdentity: INDIVIDUAL,
}
def get_object_address(name, tag):
return make_omi_address(name, tag)
def get_type_prefix(tag):
return OMI_ADDRESS_PREFIX + _get_address_infix(tag)
class Cursor:
def __init__(self, endpoint, message_type, count=100):
self.endpoint = endpoint
qs = urllib.parse.parse_qs(urllib.parse.urlparse(endpoint).query)
if 'count' not in qs:
sep = '&' if qs else '?'
self._next = "%s%scount=%d" % (self.endpoint, sep, count)
else:
self._next = self.endpoint
self.message_type = message_type
self.data = []
def _get_page(self, url):
r = requests.get(url)
r.raise_for_status()
result = r.json()
paging = result['paging']
if 'next' in paging:
self._next = paging['next']
else:
self._next = None
self.data.extend(result['data'])
def _xform(self, item):
# item['address']
# item['data']
return self.message_type.FromString(b64decode(item['data']))
def __iter__(self):
return self
def __next__(self):
if not self.data and self._next:
self._get_page(self._next)
if self.data:
return self._xform(self.data.pop(0))
raise StopIteration()
def submit_omi_transaction(base_url, private_key, action, message_type, natural_key_field, omi_obj, additional_inputs=None):
obj = message_type(**omi_obj)
if additional_inputs is None:
additional_inputs = []
public_key_hex = signing.generate_pubkey(private_key)
address = get_object_address(omi_obj[natural_key_field], TAG_MAP[message_type])
data = obj.SerializeToString()
payload = OMITransactionPayload(
action=action,
data=data,
)
payload_bytes = payload.SerializeToString()
payload_sha512 = hashlib.sha512(payload_bytes).hexdigest()
txn_header = TransactionHeader(
batcher_pubkey=public_key_hex,
family_name=FAMILY_NAME,
family_version='1.0',
inputs=[address] + additional_inputs,
outputs=[address],
nonce=str(randint(0, 1000000000)),
payload_encoding='application/protobuf',
payload_sha512=payload_sha512,
signer_pubkey=public_key_hex,
)
txn_header_bytes = txn_header.SerializeToString()
key_handler = signing.secp256k1_signer._decode_privkey(private_key)
# ecdsa_sign automatically generates a SHA-256 hash
txn_signature = key_handler.ecdsa_sign(txn_header_bytes)
txn_signature_bytes = key_handler.ecdsa_serialize_compact(txn_signature)
txn_signature_hex = txn_signature_bytes.hex()
# print([txn_signature_hex])
txn = Transaction(
header=txn_header_bytes,
header_signature=txn_signature_hex,
payload=payload_bytes,
)
batch_header = BatchHeader(
signer_pubkey=public_key_hex,
transa
|
ction_ids=[txn.header_signature],
)
batch_header_bytes = batch_header.SerializeToString()
batch_signature = key_handler.ecdsa_sign(batch_header_bytes)
batch_signature_bytes = key_handler.ecdsa_serialize_compact(batch_signature)
batch_signature_hex = batch_signature_bytes.hex()
batch = Batch(
header=batch_header_bytes,
header_signature=batch_signature_hex,
transactions=[txn],
)
|
batch_list = BatchList(batches=[batch])
batch_bytes = batch_list.SerializeToString()
batch_id = batch_signature_hex
url = "%s/batches" % base_url
headers = {
'Content-Type': 'application/octet-stream',
}
r = requests.post(url, data=batch_bytes, headers=headers)
r.raise_for_status()
link = r.json()['link']
return BatchStatus(batch_id, link)
class BatchStatus:
"""
Provides a function to query for the current status of a submitted transaction.
That is, whether or not the transaction has been committed to the block chain.
"""
def __init__(self, batch_id, status_url):
self.batch_id = batch_id
self.status_url = status_url
def check(self, timeout=5):
"""
Returns the batch status from a transaction submission. The status is one
of ['PENDING', 'COMMITTED', 'INVALID', 'UNKNOWN'].
"""
r = requests.get("%s&wait=%s" % (self.status_url, timeout))
r.raise_for_status()
return r.json()['data'][self.batch_id]
def wait_for_committed(self, timeout=30, check_timeout=5):
start_time = time.time()
while True:
current_time = time.time()
status = self.check(timeout=check_timeout)
if status == "PENDING":
return status
if start_time + timeout >= current_time:
return status
return status
class OMIClient:
def __init__(self, sawtooth_rest_url, private_key, cursor_count=100):
self.sawtooth_rest_url = sawtooth_rest_url
self.private_key = private_key
self.public_key = signing.generate_pubkey(private_key)
self.cursor_count = cursor_count
def _cursor(self, message_type):
type_prefix = get_type_prefix(TAG_MAP[message_type])
url = "%s/state?address=%s" % (self.sawtooth_rest_url, type_prefix)
return Cursor(
url,
message_type,
count=self.cursor_count
)
def _state_entry(self, message_type, name):
address = get_object_address(name, TAG_MAP[message_type])
url = "%s/state/%s" % (self.sawtooth_rest_url, address)
r = requests.get(url)
r.raise_for_status()
data = r.json()['data']
return message_type.FromString(b64decode(data))
def set_individual(self, individual):
omi_obj = dict(individual)
omi_obj['pubkey'] = self.public_key
return submit_omi_transaction(
base_url=self.sawtooth_rest_url,
private_key=self.private_key,
action='SetIndividualIdentity',
message_type=IndividualIdentity,
natural_key_field='name',
omi_obj=omi_obj,
)
def get_individual(self, name):
return self._state_entry(IndividualIdentity, name)
def get_individuals(self):
return self._cursor(IndividualIdentity)
def set_organization(self, organization):
omi_obj = dict(organization)
omi_obj['pubkey'] = self.public_key
return submit_omi_transaction(
base_url=self.sawtooth_rest_url,
private_key=self.private_key,
action='SetOrganizationalIdentity',
message_type=OrganizationalIdentity,
natural_key_field='name',
omi_obj=omi_obj,
)
def get_organization(self, name):
return self._state_entry(OrganizationalIdentity, name)
def get_organizations(self):
return self._cursor(OrganizationalIdentity)
def set_recording(self, recording):
omi_obj = dict(recording)
omi_obj['registering_pubkey'] = self.public_key
label_name = omi_obj.get('label_name', N
|
rCorvidae/OrionPI
|
src/tests/Devices/Containers/__init__.py
|
Python
|
mit
| 188
| 0.010638
|
from .TestContainersDeviceAndM
|
anager import TestContainerDeviceDataFlow
from .TestContainersRe
|
ceivingSerialDataAndObserverPattern import TestContainersReceivingSerialDataAndObserverPattern
|
DarkFenX/Phobos
|
util/__init__.py
|
Python
|
gpl-3.0
| 1,042
| 0.003839
|
#===============================================================================
# Copyright (C) 2014-2019 Anton Vorobyov
#
# This file is part of Phobos.
#
# Phobos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Softwa
|
re Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Phobos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Phobos. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
from .cached_property import cachedproperty
from .eve_normalize import EveNormalizer
from .resource_browser import ResourceBrowser
from .translator import Translator
|
crodjer/paster
|
setup.py
|
Python
|
gpl-3.0
| 2,026
| 0.001974
|
#!/usr/bin/env python
# Copyright (C) 2011 Rohan Jain
# Copyright (C) 2011 Alexis Le-Quoc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the
# GNU General Public License for more details.
#
#
|
You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from sys import version
from os.path import expanduser
import paster
if version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
setup(name='paster',
version=paster.version,
description='A generic pastebin posting tool',
author='Rohan Jain',
author_email='crodjer@gmail.com',
long_description=open('README.md').read(),
url='https://github.com/crodjer/paster',
packages = ['paster'],
data_files=[(expanduser('~'), ['paster.cfg']),],
license="GPLv3",
platforms=["all"],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development',
'Programming Language :: Python',
'Natural Language :: English',
],
scripts=['pstr'],
)
|
aruneli/rancher-test
|
ui-selenium-tests/locators/RackspaceLocators.py
|
Python
|
apache-2.0
| 1,212
| 0.006601
|
__author__ = 'Arunkumar Eli'
__email__ = "elrarun@gmail.com"
from selenium.webdriver.common.by import By
class DigitalOceanLocators(object):
ACCESS_KEY_INPUT = (By.ID, 'accessKey')
SECRET_KEY_INPUT = (By.ID, 'secretKey')
NEXT_BTN = (By.CSS_SELECTOR, "button.btn.btn-primary")
AVAILABILITY_ZONE = (By.XPATH, "//section[3]/div/div/span")
ZONE_SELECT = (By.ID, "selectedZone")
VPC_RADIO_BTN = (By.XPATH, "//div[3]/div[2]/div/label")
SUBNET_RADIO_BTN = (By.XPATH, "//div[2]/label")
SECURITY_GROUP = (By.XPATH, "///section[5]/div/div/span")
INSTANCE = (By.XPATH, "//section[7]/div/div/span")
ACCOUNT_ACCESS = (By.XPATH, "//section/div/div/span")
STD_RADIO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[1]/label/input")
CUSTOM_RAD
|
IO_BTN=(By.XPATH,"//section[5]/div[1]/div[2]/div[2]/div[2]/label/input")
SET_INSTANCE_OPTION_BTN = (By.XPATH, "//div[2]/button")
SLIDE_BAR_CLICK_3 = (By.XPATH, "//div[2]/div[3]/div")
HOST_NAME_INPUT = (By.ID, "prefix")
HOST_DESC_INPUT = (By.ID, "description")
HOST_INSTANCE_TYPE_SELECT = (By.ID, "instanceType")
HOST_MEM_SIZE_INPUT = (By.ID, "rootSize")
HOST_CREATE_BTN = (By.XPATH, "//div[2]/butt
|
on")
|
CongLi/avocado-vt
|
scripts/scan_results.py
|
Python
|
gpl-2.0
| 4,423
| 0
|
#!/usr/bin/env python
"""
Script to fetch test status info from sqlit data base. Before use this
script, avocado We must be lanuch with '--journal' option.
"""
import os
import sys
import sqlite3
import argparse
from avocado.core import data_dir
from dateutil import parser as dateparser
def colour_result(result):
"""Colour result in the test status info"""
colours_map = {"PASS": "\033[92mPASS\033[00m",
"ERROR": "\033[93mERROR\033[00m",
"FAIL": "\033[91mFAIL\033[00m"}
return colours_map.get(result) or result
def summarise_records(records):
"""Summarise test records and print it in cyan"""
num_row = len(records[0])
rows = tuple([("row%s" % x) for x in xrange(num_row)])
records_summary = {}
for rows in records:
records_summary[rows[1]] = records_summary.get(rows[1], 0) + 1
records_summary[rows[4]] = records_summary.get(rows[4], 0) + 1
res = ", ".join("%s=%r" % (
key, val) for (key, val) in records_summary.iteritems())
print "\033[96mSummary: \n" + res + "\033[00m"
def get_total_seconds(td):
""" Alias for get total_seconds in python2.6 """
if hasattr(td, 'total_seconds'):
return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def fetch_data(db_file=".journal.sqlite"):
""" Fetch tests status info from journal database"""
records = []
con = sqlite3.connect(db_file)
try:
cur = con.cursor()
|
cur.execute("select tag, time, action, status from test_journal")
while True:
# First record contation start info, second contain end info
# merged start info and end info into one record.
data = cur.fetchmany(2)
if not data:
break
tag = data[0][0]
result = "N/A"
status = "Running"
end_tim
|
e = None
end_str = None
elapsed = None
start_time = dateparser.parse(data[0][1])
start_str = start_time.strftime("%Y-%m-%d %X")
if len(data) > 1:
status = "Finshed"
result = data[1][3]
end_time = dateparser.parse(data[1][1])
time_delta = end_time - start_time
elapsed = get_total_seconds(time_delta)
end_str = end_time.strftime("%Y-%m-%d %X")
record = (tag, status, start_str, end_str, result, elapsed)
records.append(record)
finally:
con.close()
return records
def print_data(records, skip_timestamp=False):
""" Print formated tests status info"""
if not records:
return
if not skip_timestamp:
print "%-40s %-15s %-15s %-15s %-10s %-10s" % (
"CaseName", "Status", "StartTime",
"EndTime", "Result", "TimeElapsed")
else:
print "%-40s %-15s %-10s" % ("CaseName", "Status", "Result")
for row in records:
if not skip_timestamp:
print "%s %s %s %s %s %s" % (
row[0], row[1], row[2], row[3], colour_result(row[4]), row[5])
else:
print "%s %s %s" % (row[0], row[1], colour_result(row[4]))
summarise_records(records)
if __name__ == "__main__":
default_results_dir = os.path.join(data_dir.get_logs_dir(), 'latest')
parser = argparse.ArgumentParser(description="Avocado journal dump tool")
parser.add_argument(
'-d',
'--test-results-dir',
action='store',
default=default_results_dir,
dest='results_dir',
help="avocado test results dir, Default: %s" %
default_results_dir)
parser.add_argument(
'-s',
'--skip-timestamp',
action='store_true',
default=False,
dest='skip_timestamp',
help="skip timestamp output (leaving status and result enabled)")
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s 1.0')
arguments = parser.parse_args()
db_file = os.path.join(arguments.results_dir, '.journal.sqlite')
if not os.path.isfile(db_file):
print "`.journal.sqlite` DB not found in results directory, "
print "Please start avocado with option '--journal'."
parser.print_help()
sys.exit(1)
data = fetch_data(db_file)
print_data(data, arguments.skip_timestamp)
|
mpreisler/scap-security-guide-debian
|
scap-security-guide-0.1.21/shared/modules/xccdf2csv_stig_module.py
|
Python
|
gpl-2.0
| 1,883
| 0.002124
|
#!/usr/bin/python
import sys
import csv
import lxml.etree as ET
# This script creates a CSV file from an XCCDF file formatted in the
# structure of a STIG. This should enable its ingestion into VMS,
# as well as its comparison with VMS output.
xccdf_ns = "http://checklists.nist.gov/xccdf/1.1"
disa_cciuri = "http://iase.disa.mil/stigs/cci/Pages/index.aspx"
disa_srguri = "http://iase.disa.mil/stigs/srgs/Pages/index.aspx"
def parse_xml_file(xmlfile):
with open(xmlfile, 'r') as xml_file:
filestring = xml_file.read()
tree = ET.fromstring(filestring)
return tree
def reflist(refs):
refstring = ', '.join(refs)
return refstring
|
def node_to_text(node):
textslist = node.xpath(".//text()")
return ''.join(textslist)
def main():
if len(sys.argv) < 2:
print "Provide an XCCDF file to convert into a CSV file."
sys.exit(1)
xccdffile = sys.argv[1]
xccdftree = parse_xml_file(xccdffile)
rules = xccdftree.findall(".//{%s}Rule" % xccdf_ns)
rulewriter = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
|
for rule in rules:
cci_refs = [ref.text for ref in rule.findall("{%s}ident[@system='%s']"
% (xccdf_ns, disa_cciuri))]
srg_refs = [ref.text for ref in rule.findall("{%s}ident[@system='%s']"
% (xccdf_ns, disa_srguri))]
title = rule.find("{%s}title" % xccdf_ns).text
description = node_to_text(rule.find("{%s}description" % xccdf_ns))
fixtext = node_to_text(rule.find("{%s}fixtext" % xccdf_ns))
checktext = node_to_text(rule.find(".//{%s}check-content" % xccdf_ns))
row = [reflist(cci_refs), reflist(srg_refs), title, description, fixtext, checktext]
rulewriter.writerow(row)
sys.exit(0)
if __name__ == "__main__":
main()
|
ReproducibleBuilds/diffoscope
|
diffoscope/presenters/html/html.py
|
Python
|
gpl-3.0
| 30,030
| 0.001632
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
# © 2015 Reiner Herrmann <reiner@reiner-h.de>
# © 2
|
012-2013 Olivier Matz <zer0@droids-corp.org>
# © 2012 Alan De Smet <adesmet@cs.wisc.edu>
# © 2012 Sergey Satskiy <sergey.satskiy@gmail.com>
# © 2012 scito <info@scito.ch>
#
#
|
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
#
#
# Most of the code is borrowed from diff2html.py available at:
# http://git.droids-corp.org/?p=diff2html.git
#
# Part of the code is inspired by diff2html.rb from
# Dave Burt <dave (at) burt.id.au> (mainly for html theme)
#
import base64
import codecs
import collections
import contextlib
import hashlib
import html
import io
import logging
import os
import re
import sys
from urllib.parse import urlparse
from diffoscope import VERSION
from diffoscope.config import Config
from diffoscope.diff import SideBySideDiff, DIFFON, DIFFOFF
from ..icon import FAVICON_BASE64
from ..utils import sizeof_fmt, PrintLimitReached, DiffBlockLimitReached, \
Presenter, make_printer, PartialString
from . import templates
# minimum line size, we add a zero-sized breakable space every
# LINESIZE characters
LINESIZE = 20
TABSIZE = 8
# Characters we're willing to word wrap on
WORDBREAK = " \t;.,/):-"
JQUERY_SYSTEM_LOCATIONS = (
'/usr/share/javascript/jquery/jquery.js',
)
logger = logging.getLogger(__name__)
re_anchor_prefix = re.compile(r'^[^A-Za-z]')
re_anchor_suffix = re.compile(r'[^A-Za-z-_:\.]')
def send_and_exhaust(iterator, arg, default):
"""Send a single value to a coroutine, exhaust it, and return the final
element or a default value if it was empty."""
# Python's coroutine syntax is still a bit rough when you want to do
# slightly more complex stuff. Watch this logic closely.
output = default
try:
output = iterator.send(arg)
except StopIteration:
pass
for output in iterator:
pass
return output
def md5(s):
return hashlib.md5(s.encode('utf-8')).hexdigest()
def escape_anchor(val):
"""
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be followed
by any number of letters, digits ([0-9]), hyphens ("-"), underscores ("_"),
colons (":"), and periods (".").
"""
for pattern, repl in (
(re_anchor_prefix, 'D'),
(re_anchor_suffix, '-'),
):
val = pattern.sub(repl, val)
return val
def output_diff_path(path):
return ' / '.join(n.source1 for n in path[1:])
def output_anchor(path):
return escape_anchor(output_diff_path(path))
def convert(s, ponct=0, tag=''):
i = 0
t = io.StringIO()
for c in s:
# used by diffs
if c == DIFFON:
t.write('<%s>' % tag)
elif c == DIFFOFF:
t.write('</%s>' % tag)
# special highlighted chars
elif c == "\t" and ponct == 1:
n = TABSIZE - (i % TABSIZE)
if n == 0:
n = TABSIZE
t.write('<span class="diffponct">\xbb</span>'+'\xa0'*(n-1))
elif c == " " and ponct == 1:
t.write('<span class="diffponct">\xb7</span>')
elif c == "\n" and ponct == 1:
t.write('<br/><span class="diffponct">\</span>')
elif ord(c) < 32:
conv = u"\\x%x" % ord(c)
t.write('<em>%s</em>' % conv)
i += len(conv)
else:
t.write(html.escape(c))
i += 1
if WORDBREAK.count(c) == 1:
t.write('\u200b')
i = 0
if i > LINESIZE:
i = 0
t.write('\u200b')
return t.getvalue()
def output_visual(visual, path, indentstr, indentnum):
logger.debug('including image for %s', visual.source)
indent = tuple(indentstr * (indentnum + x) for x in range(3))
anchor = output_anchor(path)
return u"""{0[0]}<div class="difference">
{0[1]}<div class="diffheader">
{0[1]}<div class="diffcontrol">⊟</div>
{0[1]}<div><span class="source">{1}</span>
{0[2]}<a class="anchor" href="#{2}" name="{2}">\xb6</a>
{0[1]}</div>
{0[1]}</div>
{0[1]}<div class="difference"><img src=\"data:{3},{4}\" alt=\"compared images\" /></div>
{0[0]}</div>""".format(indent, html.escape(visual.source), anchor, visual.data_type, visual.content)
def output_node_frame(difference, path, indentstr, indentnum, body):
indent = tuple(indentstr * (indentnum + x) for x in range(3))
anchor = output_anchor(path)
dctrl_class, dctrl = ("diffcontrol", u'⊟') if difference.has_visible_children() else ("diffcontrol-nochildren", u'⊡')
if difference.source1 == difference.source2:
header = u"""{0[1]}<div class="{1}">{2}</div>
{0[1]}<div><span class="diffsize">{3}</span></div>
{0[1]}<div><span class="source">{5}</span>
{0[2]}<a class="anchor" href="#{4}" name="{4}">\xb6</a>
{0[1]}</div>
""".format(indent, dctrl_class, dctrl, sizeof_fmt(difference.size()), anchor,
html.escape(difference.source1))
else:
header = u"""{0[1]}<div class="{1} diffcontrol-double">{2}</div>
{0[1]}<div><span class="diffsize">{3}</span></div>
{0[1]}<div><span class="source">{5}</span> vs.</div>
{0[1]}<div><span class="source">{6}</span>
{0[2]}<a class="anchor" href="#{4}" name="{4}">\xb6</a>
{0[1]}</div>
""".format(indent, dctrl_class, dctrl, sizeof_fmt(difference.size()), anchor,
html.escape(difference.source1),
html.escape(difference.source2))
return PartialString.numl(u"""{0[1]}<div class="diffheader">
{1}{0[1]}</div>
{2}""", 3).pformatl(indent, header, body)
def output_node(ctx, difference, path, indentstr, indentnum):
"""Returns a tuple (parent, continuation) where
- parent is a PartialString representing the body of the node, including
its comments, visuals, unified_diff and headers for its children - but
not the bodies of the children
- continuation is either None or (only in html-dir mode) a function which
when called with a single integer arg, the maximum size to print, will
print any remaining "split" pages for unified_diff up to the given size.
"""
indent = tuple(indentstr * (indentnum + x) for x in range(3))
t, cont = PartialString.cont()
comments = u""
if difference.comments:
comments = u'{0[1]}<div class="comment">\n{1}{0[1]}</div>\n'.format(
indent, "".join(u"{0[2]}{1}<br/>\n".format(indent, html.escape(x)) for x in difference.comments))
visuals = u""
for visual in difference.visuals:
visuals += output_visual(visual, path, indentstr, indentnum+1)
udiff = u""
ud_cont = None
if difference.unified_diff:
ud_cont = HTMLSideBySidePresenter().output_unified_diff(
ctx, difference.unified_diff, difference.has_internal_linenos)
udiff = next(ud_cont)
if isinstance(udiff, PartialString):
ud_cont = ud_cont.send
udiff = udiff.pformatl(PartialString.of(ud_cont))
else:
for _ in ud_cont:
pass # exhaust the iterator, avoids GeneratorExit
ud_cont = None
# PartialString for this node
body = PartialString.numl(u"{0}{1}{2}{-1}", 3, cont).pformatl(comments, visuals, udiff)
if len(path) == 1:
# root node, frame it
body = output_node_frame(difference, path, indentstr, indentnum, body)
t = cont(t, body)
# Add holes for child nodes
for d in difference.details:
child = output_node_frame(d, path + [d], indentstr, i
|
phenopolis/phenopolis
|
tests/test_my_patients.py
|
Python
|
mit
| 2,878
| 0.009382
|
# Uncomment to run this module directly. TODO comment out.
#import sys, os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# End of uncomment.
import unittest
import subprocess
import runserver
from flask import Flask, current_app, jsonify
from views import neo4j_driver
from views import my_patients
from views import session
import helper
import json
class MyPatientsPageTestCase(unittest.TestCase):
def setUp(self):
runserver.app.config['TESTING'] = True
runserver.app.config['DB_NAME_USERS'
|
] = 'test_users'
self.app = runserver.app.test_client()
#helper.create_neo4j_demo_user()
helper.login(self.app)
helper.my_patients_neo4j_data()
def tearDown(self):
self.app.get('/logout', follow_redirects=True)
def test_my_patients_page(self):
page = self.app.get('/my_patients', follow_redirects=True)
assert page.status_code == 200 # NB this test doesn't wait for the data to load.
|
def test_my_patients_functionality(self):
app = Flask(__name__)
with app.test_request_context():
records = my_patients.get_individuals('demo')
# Here we create the Flask Response object, containing json,
# that the /my_patients page receives. We then test
# that the expected data is available.
data=jsonify(result=records)
assert data.status == '200 OK'
parsed_json = json.loads(data.data)
# First person.
i=0
assert parsed_json['result'][i]['individual'] == 'person2'
assert parsed_json['result'][i]['gender'] == 'F'
for pheno in parsed_json['result'][i]['phenotypes'] :
assert (pheno['name'] == 'Abnormality of the retina' or
pheno['name'] == 'Visual impairment' or
pheno['name'] == 'Macular dystrophy')
assert parsed_json['result'][i]['phenotypeScore'] == 0.69
assert parsed_json['result'][i]['hom_count'] == 1
assert parsed_json['result'][i]['het_count'] == 2
for gene in parsed_json['result'][i]['genes'] :
assert gene == 'RPGR' or gene == 'TTLL5' or gene == 'DRAM2' or gene == 'TRIM32'
# Next person.
i=1
assert parsed_json['result'][i]['individual'] == 'person1'
assert parsed_json['result'][i]['gender'] == 'M'
assert parsed_json['result'][i]['phenotypes'][0]['name'] == 'Visual impairment'
assert parsed_json['result'][i]['phenotypeScore'] == 0.69
assert parsed_json['result'][i]['hom_count'] == 1
assert parsed_json['result'][i]['het_count'] == 1
assert parsed_json['result'][i]['genes'][0] == 'TTLL5'
if __name__ == '__main__':
unittest.main()
|
pomegranited/edx-platform
|
lms/djangoapps/course_api/serializers.py
|
Python
|
agpl-3.0
| 3,293
| 0.001518
|
"""
Course API Serializers. Representing course catalog data
"""
import urllib
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from rest_framework import serializers
from lms.djangoapps.courseware.courses import course_image_url, get_course_about_section
from xmodule.course_module import DEFAULT_START_DATE
class _MediaSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a media object.
"""
def __init__(self, uri_parser, *args, **kwargs):
super(_MediaSerializer, self).__init__(*args, **kwargs)
self.uri_parser = uri_parser
uri = serializers.SerializerMethodField(source='*')
def get_uri(self, course):
"""
Get the representation for the media resource's URI
"""
return self.uri_parser(course)
class _CourseApiMediaCollectionSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a collection of media objects
"""
course_image = _MediaSerializer(source='*', uri_parser=course_image_url)
class CourseSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Seriali
|
zer for Course objects
|
"""
course_id = serializers.CharField(source='id', read_only=True)
name = serializers.CharField(source='display_name_with_default')
number = serializers.CharField(source='display_number_with_default')
org = serializers.CharField(source='display_org_with_default')
description = serializers.SerializerMethodField()
media = _CourseApiMediaCollectionSerializer(source='*')
start = serializers.DateTimeField()
start_type = serializers.SerializerMethodField()
start_display = serializers.SerializerMethodField()
end = serializers.DateTimeField()
enrollment_start = serializers.DateTimeField()
enrollment_end = serializers.DateTimeField()
blocks_url = serializers.SerializerMethodField()
def get_start_type(self, course):
"""
Get the representation for SerializerMethodField `start_type`
"""
if course.advertised_start is not None:
return u'string'
elif course.start != DEFAULT_START_DATE:
return u'timestamp'
else:
return u'empty'
def get_start_display(self, course):
"""
Get the representation for SerializerMethodField `start_display`
"""
if course.advertised_start is not None:
return course.advertised_start
elif course.start != DEFAULT_START_DATE:
return defaultfilters.date(course.start, "DATE_FORMAT")
else:
return None
def get_description(self, course):
"""
Get the representation for SerializerMethodField `description`
"""
return get_course_about_section(self.context['request'], course, 'short_description').strip()
def get_blocks_url(self, course):
"""
Get the representation for SerializerMethodField `blocks_url`
"""
base_url = '?'.join([
reverse('blocks_in_course'),
urllib.urlencode({'course_id': course.id}),
])
return self.context['request'].build_absolute_uri(base_url)
|
ddico/odoo
|
addons/l10n_ch/models/account_invoice.py
|
Python
|
agpl-3.0
| 11,966
| 0.005265
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
from odoo.tools.float_utils import float_split_str
from odoo.tools.misc import mod10r
l10n_ch_ISR_NUMBER_LENGTH = 27
l10n_ch_ISR_NUMBER_ISSUER_LENGTH = 12
class AccountMove(models.Model):
_inherit = 'account.move'
l10n_ch_isr_subscription = fields.Char(compute='_compute_l10n_ch_isr_subscription', help='ISR subscription number identifying your company or your bank to generate ISR.')
l10n_ch_isr_subscription_formatted = fields.Char(compute='_compute_l10n_ch_isr_subscription', help="ISR subscription number your company or your bank, formated with '-' and without the padding zeros, to generate ISR report.")
l10n_ch_isr_number = fields.Char(compute='_compute_l10n_ch_isr_number', store=True, help='The reference number associated with this invoice')
l10n_ch_isr_number_spaced = fields.Char(compute='_compute_l10n_ch_isr_number_spaced', help="ISR number split in blocks of 5 characters (right-justified), to generate ISR report.")
l10n_ch_isr_optical_line = fields.Char(compute="_compute_l10n_ch_isr_optical_line", help='Optical reading line, as it will be printed on ISR')
l10n_ch_isr_valid = fields.Boolean(compute='_compute_l10n_ch_isr_valid', help='Boolean value. True iff all the data required to generate the ISR are present')
l10n_ch_isr_sent = fields.Boolean(default=False, help="Boolean value telling whether or not the ISR corresponding to this invoice has already been printed or sent by mail.")
l10n_ch_currency_name = fields.Char(related='currency_id.name', readonly=True, string="Currency Name", help="The name of this invoice's currency") #This field is used in the "invisible" condition field of the 'Print ISR' button.
@api.depends('partner_bank_id.l10n_ch_isr_subscription_eur', 'partner_bank_id.l10n_ch_isr_subscription_chf')
def _compute_l10n_ch_isr_subscription(self):
""" Computes the ISR subscription identifying your company or the bank that allows to generate ISR. And formats it accordingly"""
def _format_isr_subscription(isr_subscription):
#format the isr as per specifications
currency_code = isr_subscription[:2]
middle_part = isr_subscription[2:-1]
trailing_cipher = isr_subscription[-1]
middle_part = re.sub('^0*', '', middle_part)
return currency_code + '-' + middle_part + '-' + trailing_cipher
def _format_isr_subscription_scanline(isr_subscription):
# format the isr for scanline
return isr_subscription[:2] + isr_subscription[2:-1].rjust(6, '0') + isr_subscription[-1:]
for record in self:
record.l10n_ch_isr_subscription = False
record.l10n_ch_isr_subscription_formatted = False
if record.partner_bank_id:
if record.currency_id.name == 'EUR':
isr_subscription = record.partner_bank_id.l10n_ch_isr_subscription_eur
elif record.currency_id.name == 'CHF':
isr_subscription = record.part
|
ner_bank_id.l10n_ch_isr_subscription_chf
|
else:
#we don't format if in another currency as EUR or CHF
continue
if isr_subscription:
isr_subscription = isr_subscription.replace("-", "") # In case the user put the -
record.l10n_ch_isr_subscription = _format_isr_subscription_scanline(isr_subscription)
record.l10n_ch_isr_subscription_formatted = _format_isr_subscription(isr_subscription)
@api.depends('name', 'partner_bank_id.l10n_ch_postal')
def _compute_l10n_ch_isr_number(self):
""" The ISR reference number is 27 characters long. The first 12 of them
contain the postal account number of this ISR's issuer, removing the zeros
at the beginning and filling the empty places with zeros on the right if it is
too short. The next 14 characters contain an internal reference identifying
the invoice. For this, we use the invoice sequence number, removing each
of its non-digit characters, and pad the unused spaces on the left of
this number with zeros. The last character of the ISR number is the result
of a recursive modulo 10 on its first 26 characters.
"""
for record in self:
if record.name and record.partner_bank_id and record.partner_bank_id.l10n_ch_postal:
invoice_issuer_ref = record.partner_bank_id.l10n_ch_postal.ljust(l10n_ch_ISR_NUMBER_ISSUER_LENGTH, '0')
invoice_ref = re.sub('[^\d]', '', record.name)
#We only keep the last digits of the sequence number if it is too long
invoice_ref = invoice_ref[-l10n_ch_ISR_NUMBER_ISSUER_LENGTH:]
internal_ref = invoice_ref.zfill(l10n_ch_ISR_NUMBER_LENGTH - l10n_ch_ISR_NUMBER_ISSUER_LENGTH - 1) # -1 for mod10r check character
record.l10n_ch_isr_number = mod10r(invoice_issuer_ref + internal_ref)
else:
record.l10n_ch_isr_number = False
@api.depends('l10n_ch_isr_number')
def _compute_l10n_ch_isr_number_spaced(self):
def _space_isr_number(isr_number):
to_treat = isr_number
res = ''
while to_treat:
res = to_treat[-5:] + res
to_treat = to_treat[:-5]
if to_treat:
res = ' ' + res
return res
for record in self:
if record.name and record.partner_bank_id and record.partner_bank_id.l10n_ch_postal:
record.l10n_ch_isr_number_spaced = _space_isr_number(record.l10n_ch_isr_number)
else:
record.l10n_ch_isr_number_spaced = False
@api.depends(
'currency_id.name', 'amount_residual', 'name',
'partner_bank_id.l10n_ch_postal',
'partner_bank_id.l10n_ch_isr_subscription_eur',
'partner_bank_id.l10n_ch_isr_subscription_chf')
def _compute_l10n_ch_isr_optical_line(self):
""" The optical reading line of the ISR looks like this :
left>isr_ref+ bank_ref>
Where:
- left is composed of two ciphers indicating the currency (01 for CHF,
03 for EUR), followed by ten characters containing the total of the
invoice (with the dot between units and cents removed, everything being
right-aligned and empty places filled with zeros). After the total,
left contains a last cipher, which is the result of a recursive modulo
10 function ran over the rest of it.
- isr_ref is the ISR reference number
- bank_ref is the full postal bank code (aka clearing number) of the
bank supporting the ISR (including the zeros).
"""
for record in self:
record.l10n_ch_isr_optical_line = ''
if record.l10n_ch_isr_number and record.l10n_ch_isr_subscription and record.currency_id.name:
#Left part
currency_code = None
if record.currency_id.name == 'CHF':
currency_code = '01'
elif record.currency_id.name == 'EUR':
currency_code = '03'
units, cents = float_split_str(record.amount_residual, 2)
amount_to_display = units + cents
amount_ref = amount_to_display.zfill(10)
left = currency_code + amount_ref
left = mod10r(left)
#Final assembly (the space after the '+' is no typo, it stands in the specs.)
record.l10n_ch_isr_optical_line = left + '>' + record.l10n_ch_isr_number + '+ ' + record.l10n_ch_isr_subscription + '>'
@api.depends(
'move_type', 'name', 'currency_id.name',
'partner_bank_id.l10n_ch_postal',
'partner_bank_id.l10n_ch_isr_subscription_eur',
'partner_bank_id.l10n_ch_isr_subscription_chf')
def _comp
|
damiencalloway/djtut
|
mysite/polls/admin.py
|
Python
|
mit
| 570
| 0.014035
|
from django.contrib import admin
from pol
|
ls.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question','pub_date')
list_filter = ['pub_date']
search_fields = ['question']
date_hierarchy = 'pub_d
|
ate'
admin.site.register(Poll, PollAdmin)
|
Pennapps-XV/backend
|
root/parse-server.py
|
Python
|
gpl-3.0
| 2,081
| 0.005766
|
import json
import sys
import requests
from collections import Counter
from wapy.api import Wapy
from http.server import BaseHTTPRequestHandler, HTTPServer
wapy = Wapy('frt6ajvkqm4aexwjksrukrey')
def removes(yes):
no = ["Walmart.com", ".", ","]
for x in no:
yes = yes.replace(x, '')
return yes
def post_some_dict(dict):
headers = {'Content-type': 'application/json'}
r = requests.post("http://127.0.0.1:5000/search", data=json.dumps(dict), headers=headers)
return r.text
def parse_image(image):
out = json.loads(post_some_dict({"image_url": image}))['titles']
print(out)
#out = [x for x in out if 'walmart' in x]
threshold = len(out)-1
#out = [x[27:-9] for x in out]
#print(out)
large = []
for line in out:
line = line.replace('-', '')
line = removes(line)
line = li
|
ne.split(' ')
for word in line:
large.append(word)
#print(large)
c = Counter(large).most_common(
|
)
keywords = []
for x in c:
if x[1] > threshold:
keywords.append(x[0])
print(keywords)
return ' '.join(keywords)
def parse_wallmart(keywords):
products = wapy.search(' '.join(keywords))
out = {}
out['name'] = products[0].name
out['rating'] = products[0].customer_rating
out['price'] = products[0].sale_price
return json.dumps(out)
class StoreHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(fh.read().encode())
def do_POST(self):
self.send_response(200)
length = self.headers['content-length']
data = self.rfile.read(int(length))
with open('/var/www/html/image.jpg', 'wb') as fh:
fh.write(data)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(parse_wallmart(parse_image('http://45.33.95.66/image.jpg')).encode())
server = HTTPServer(('', 8081), StoreHandler)
server.serve_forever()
|
nafitzgerald/allennlp
|
allennlp/modules/elmo.py
|
Python
|
apache-2.0
| 18,830
| 0.002921
|
import json
from typing import Union, List, Dict, Any
import torch
from torch.autograd import Variable
from torch.nn.modules import Dropout
import numpy
import h5py
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.common import Registrable, Params
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.modules.highway import Highway
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.nn.util import remove_sentence_boundaries, add_sentence_boundary_token_ids
from allennlp.data import Vocabulary
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper
# pylint: disable=attribute-defined-outside-init
@Registrable.register('elmo')
class Elmo(torch.nn.Module, Registrable):
"""
Compute ELMo representations using a pre-trained bidirectional language model.
See "Deep contextualized word representations", Peters et al. for details.
This module takes character id input and computes ``num_output_representations`` different layers
of ELMo representations. Typically ``num_output_representations`` is 1 or 2. For example, in
the case of the SRL model in the above paper, ``num_output_representations=1`` where ELMo was included at
the input token representation layer. In the case of the SQuAD model, ``num_output_representations=2``
as ELMo was also included at the GRU output layer.
In the implementation below, we learn separate scalar weights for each output layer,
but only run the biLM once on each input sequence for efficiency.
Parameters
----------
options_file : ``str``, required.
ELMo JSON options file
weight_file : ``str``, required.
ELMo hdf5 weight file
num_output_representations: ``int``, required.
The number of ELMo representation layers to output.
do_layer_norm : ``bool``, optional, (default=False).
Should we apply layer normalization (passed to ``ScalarMix``)?
dropout : ``float``, optional, (default = 0.5).
The dropout to be applied to the ELMo representations.
"""
def __init__(self,
options_file: str,
weight_file: str,
num_output_representations: int,
do_layer_norm: bool = False,
dropout: float = 0.5) -> None:
super(Elmo, self).__init__()
self._elmo_lstm = _ElmoBiLm(options_file, weight_file)
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module('scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs : ``torch.autograd.Variable``
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
We also accept tensors with additional optional dimensions:
``(batch_size, dim0, dim1, ..., dimn, timesteps, 50)``
Returns
-------
Dict with keys:
``'elmo_representations'``: ``List[torch.autograd.Variable]``
A ``num_output_representations`` list of ELMo representations for the input sequence.
Each representation is shape ``(batch_size, timesteps, embedding_dim)``
``'mask'``: ``torch.autograd.Variable``
Shape ``(batch_size, timesteps)`` long tensor with sequence mask.
"""
# reshape the input if needed
original_shape = inputs.size()
timesteps, num_characters = original_shape[-2:]
if len(original_shape) > 3:
reshaped_inputs = inputs.view(-1, timesteps, num_characters)
else:
reshaped_inputs = inputs
# run the biLM
bilm_output = self._elmo_lstm(reshaped_inputs)
layer_activations = bilm_output['activations']
mask_with_bos_eos = bilm_output['mask']
# compute the elmo representations
representations = []
for scalar_mix in self._scalar_mixes:
representation_with_bos_eos = scalar_mix(layer_activations, mask_with_bos_eos)
representation_without_bos_eos, mask_without_bos_eos = remove_sentence_boundaries(
representation_with_bos_eos, mask_with_bos_eos
)
representations.append(self._dropout(representation_without_bos_eos))
# reshape if necessary
if len(original_shape) > 3:
mask = mask_without_bos_eos.view(original_shape[:-1])
elmo_representations = [representation.view(original_shape[:-1] + (-1, ))
for representation in representations]
else:
mask = mask_without_bos_eos
elmo_representations = representations
return {'elmo_representations': elmo_representations, 'mask': mask}
@classmethod
def from_params(cls, params: Params) -> 'Elmo':
# Add files to archive
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
num_output_representations = params.pop('num_output_representations')
do_layer_norm = params.pop('do_layer_norm', False)
params.assert_empty(cls.__name__)
return cls(options_file, weight_file, num_output_representations, do_layer_norm)
class _ElmoCharacterEncoder(torch.nn.Module):
"""
Compute context sensitive token representation using pretrained biLM.
This embedder has input character ids of size (batch_size, sequence_length, 50)
and returns (batch_size, sequence_length + 2, embedding_dim), where embedding_dim
is specified in the options file (typically 512).
We add special entries at the beginning and end of each sequence corresponding
to <S> and </S>, the beginning and end of sentence tokens.
Note: this is a lower level class useful for advanced usage. Most users should
use ``ElmoTokenEmbedder`` or ``allennlp.modules.Elmo`` instead.
Parameters
----------
options_file : ``str``
ELMo JSON options file
weight_file : ``str``
ELMo hdf5 weight file
The rele
|
vant section of the options file is something like:
.. example-code::
|
.. code-block:: python
{'char_cnn': {
'activation': 'relu',
'embedding': {'dim': 4},
'filters': [[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
'max_characters_per_token': 50,
'n_characters': 262,
'n_highway': 2
}
}
"""
def __init__(self,
options_file: str,
weight_file: str) -> None:
super(_ElmoCharacterEncoder, self).__init__()
with open(cached_path(options_file), 'r') as fin:
self._options = json.load(fin)
self._weight_file = weight_file
self.output_dim = self._options['lstm']['projection_dim']
self._load_weights()
# Cache the arrays for use in forward -- +1 due to masking.
self._beginning_of_sentence_characters = Variable(torch.from_numpy(
numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
))
self._end_of_sentence_characters = Variable(torch.from_numpy(
numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
))
def get_output_dim(self):
return self.output_dim
@overrides
def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ
"""
Compute context insensitive token embeddings for ELMo representations.
Parameters
----------
inputs: ``torch.autograd.Variable``
|
tomsilver/nupic
|
examples/opf/tools/MirrorImageViz/mirrorImageViz.py
|
Python
|
gpl-3.0
| 7,336
| 0.023719
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverl
|
ap = max([len(seen[m][1].intersection(y[4])) for m in xr
|
ange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
################################################################################################################
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
|
vesche/HotC
|
old/server_proto.py
|
Python
|
unlicense
| 1,750
| 0
|
#
# HotC Server
# CTN2 Jackson
#
import socket
def _recv_data(conn):
data = conn.recv(1024)
command, _, arguments = data.partition(' ')
return command, arguments
def game(conn):
print 'success'
def login_loop(conn):
while True:
command, arguments = _recv_data(conn)
if command == 'login':
username, password = arguments.split()
# check if username and password is correct
with open('login.d', 'r') as f:
logins = eval(f.read())
for k, v in logins.items():
if (k == username) and (v == password):
conn.send('login_success')
return
conn.send('login_failure')
elif command == 'register':
username, password = arguments.split()
# check if username already registered
with open('login.d', 'r') as f:
logins = eval(f.read())
for k, _ in logins.items():
if k == username:
conn.send('register_failure')
continue
# register new user
logins[username] = password
|
with open('login.d', 'w') as f:
f.write(str(logins))
conn.se
|
nd('register_success')
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 1337))
sock.listen(5)
while True:
conn, addr = sock.accept()
login_loop(conn)
game(conn)
break
if __name__ == '__main__':
main()
|
ashh87/caffeine
|
caffeine/core.py
|
Python
|
gpl-3.0
| 20,950
| 0.008497
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2009 The Caffeine Developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk, GObject, Gio, Notify
import os
import os.path
import commands
import time
import sys
import dbus
import threading
import applicationinstance
import caffeine
import utils
import procmanager
import caffeinelogging as logging
import Xlib.display
#import kaa.metadata
os.chdir(os.path.abspath(os.path.dirname(__file__)))
class Caffeine(GObject.GObject):
def __init__(self):
GObject.GObject.__init__(self)
## object to manage processes to activate for.
self.ProcMan = caffeine.get_ProcManager()
## Status string.
self.status_string = ""
## Makes sure that only one instance of Caffeine is run for
## each user on the system.
self.pid_name = '/tmp/caffeine' + str(os.getuid()) + '.pid'
self.appInstance = applicationinstance.ApplicationInstance( self.pid_name )
## This variable is set to a string describing the type of screensaver and
## powersaving systems used on this computer. It is detected when the user
## first attempts to inhibit the screensaver and powersaving, and can be set
## to one of the following values: "Gnome", "KDE", "XSS+DPMS" o
|
r "DPMS".
self.screensaverAndPowersavingType = None
# Set to True when the detection routine is in progress
self.attemptingToDetect =
|
False
self.dbusDetectionTimer = None
self.dbusDetectionFailures = 0
# Set to True when sleep seems to be prevented from the perspective of the user.
# This does not necessarily mean that sleep really is prevented, because the
# detection routine could be in progress.
self.sleepAppearsPrevented = False
# Set to True when sleep mode has been successfully inhibited somehow. This should
# match up with "self.sleepAppearsPrevented" most of the time.
self.sleepIsPrevented = False
self.preventedForProcess = False
self.preventedForQL = False
self.preventedForFlash = False
self.screenSaverCookie = None
self.powerManagementCookie = None
self.timer = None
self.inhibit_id = None
self.note = None
## check for processes to activate for.
id = GObject.timeout_add(10000, self._check_for_process)
settings = Gio.Settings.new(caffeine.BASE_KEY)
## check for Quake Live.
self.ql_id = None
if settings.get_boolean("act-for-quake"):
self.setActivateForQL(True)
## check for Flash video.
self.flash_durations = {}
self.flash_id = None
if settings.get_boolean("act-for-flash"):
self.setActivateForFlash(True)
print self.status_string
def setActivateForFlash(self, do_activate):
## In case caffeine is currently activated for Flash
self._check_for_Flash()
if self.flash_id != None:
GObject.source_remove(self.flash_id)
self.flash_id = None
if do_activate:
self.flash_id = GObject.timeout_add(15000,
self._check_for_Flash)
def _check_for_Flash(self):
class escape(Exception):pass
try:
## look for files opened by flashplayer that begin with 'Flash'
output = commands.getoutput("python flash_detect.py")
if output.startswith("1"):
if self.preventedForFlash:
self.setActivated(False)
self.preventedForFlash = False
self.status_string = ""
raise escape
elif output.startswith("2\n"):
data = output.split("\n")[-1]
logging.error("Exception: " + str(data))
raise escape
if not self.getActivated():
logging.info("Caffeine has detected "+
"that Flash video is playing")
self.status_string = _("Activated for Flash video")
self.setActivated(True)
self.preventedForFlash = True
else:
logging.info("Caffeine has detected "+
"that Flash video is playing but will "+
"NOT activate because Caffeine is already "+
"activated for a different reason.")
return True
except escape:
pass
except Exception, data:
logging.error("Exception: " + str(data))
return True
def setActivateForQL(self, do_activate):
## In case caffeine is currently activated for QL
self._check_for_QL()
if self.ql_id != None:
GObject.source_remove(self.ql_id)
self.ql_id = None
if do_activate:
self.ql_id = GObject.timeout_add(15000, self._check_for_QL)
def _check_for_QL(self):
dsp = None
try:
dsp = Xlib.display.Display()
screen = dsp.screen()
root_win = screen.root
activate = False
## iterate through all of the X windows
for window in root_win.query_tree()._data['children']:
window_name = window.get_wm_name()
width = window.get_geometry()._data["width"]
height = window.get_geometry()._data["height"]
if window_name == "QuakeLive":
activate = True
if self.preventedForQL or not self.getActivated():
self.status_string = _("Activated for Quake Live")
logging.info("Caffeine has detected that 'QuakeLive' is running, and will auto-activate")
self.setActivated(True)
self.preventedForQL = True
if not activate and self.preventedForQL:
logging.info("Caffeine had previously auto-activated for QuakeLive, but it is no longer running; deactivating...")
self.setActivated(False)
except Exception, data:
logging.error("Exception: " + str(data))
finally:
if dsp != None:
dsp.close()
return True
def _check_for_process(self):
activate = False
for proc in self.ProcMan.get_process_list():
if utils.isProcessRunning(proc):
activate = True
if self.preventedForProcess or not self.getActivated():
logging.info("Caffeine has detected that the process '" + proc + "' is running, and will auto-activate")
self.setActivated(True)
self.preventedForProcess = True
else:
logging.info("Caffeine has detected that the process '"+
proc + "' is running, but will NOT auto-activate"+
" as Caffeine has already been activated for a different"+
" reason.")
### No process in the list is running, deactivate.
if not activate and self.preventedForProcess:
logging.info("Caffeine had previously auto-activated for a process, but that process is no longer running; deactivating...")
self.setActivated(False)
return True
def quit(self):
"""Cancels any timer thread running
so the progr
|
sargas/scipy
|
scipy/ndimage/interpolation.py
|
Python
|
bsd-3-clause
| 25,990
| 0.001578
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMEN
|
T OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
spline_filter1d : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
|
Yordan92/Pac-man-multiplayer
|
MakeGraph.py
|
Python
|
gpl-3.0
| 3,015
| 0.038143
|
import pygame
from PacManMap import *
class MakeGraph:
def __init__(self):
self.shortest_path_from_one_to_other = {}
self.nodes = self.find_nodes()
def get_shortest_path(self):
return self.shortest_path_from_one_to_other
def get_nodes(self):
return self.nodes
def find_nodes(self):
nodes = []
for row_n in range(1, len(Map) - 1):
for col_n in range(2, len(Map[0]) - 1):
if (Map[row_n][col_n] != 0 and Map[row_n][col_n + 1] != 0 and
Map[row_n][col_n - 1] != 0):
if ((row_n > 0 and Map[row_n - 1][col_n] != 0) or
(row_n < len(Map[0]) - 2 and Map[row_n + 1][col_n] != 0)):
nodes.append((row_n, col_n))
Map[row_n][col_n] = 3
Map1 = list(zip(*Map))
for row_n in range(1, len(Map1) - 1):
for col_n in range(2, len(Map1[0]) - 1):
if (Map1[row_n][col_n] != 0 and Map1[row_n][col_n + 1] != 0 and
Map1[row_n][col_n -
|
1] != 0):
if ((row_n > 0 and Map1[row_n - 1][col_n] != 0) or
(row_n < len(Map1[0]) - 2 and Map1[row_n + 1][col_n] != 0)):
nodes.append((col_n, row_n))
Map[col_n][row_n] = 3
return nodes
def is_p_vertex(self, vertex):
if ((vertex[0] < 0 or vertex[0] >= len(Map)) or
(vertex[1] < 0 or vertex[1] >= len(Map[0]))):
return False
if Map[vertex[0]][vertex[1]] == 0:
return F
|
alse
return True
def bfs(self, vertex):
Path_all_in_Matrix = {}
Path_all_in_Matrix[vertex] = vertex
Path_to_Nodes = {}
Path_to_Nodes[vertex] = vertex
queue = [vertex]
Visited = [vertex]
all_Nodes = self.find_nodes()
all_Nodes.remove(vertex)
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
if new_v in all_Nodes:
full_path = [new_v]
temp_v = new_v
while Path_all_in_Matrix[temp_v] != vertex:
full_path.append(Path_all_in_Matrix[temp_v])
temp_v = Path_all_in_Matrix[temp_v]
full_path.reverse()
temp_full = []
for i in full_path:
if i in all_Nodes:
temp_full.append(i)
break
temp_full.append(i)
Path_to_Nodes[new_v] = temp_full
all_Nodes.remove(new_v)
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in Visited:
queue.append(v_adj)
Path_all_in_Matrix[v_adj] = new_v
Visited.append(v_adj)
return Path_to_Nodes
def make_all_paths(self):
all_Nodes = self.find_nodes()
for node in all_Nodes:
self.shortest_path_from_one_to_other[node] = self.bfs(node)
return self.shortest_path_from_one_to_other
def draw_shortest_path(self, screen, v1, v2):
if not self.shortest_path_from_one_to_other:
self.make_all_paths()
l = self.shortest_path_from_one_to_other[v1][v2]
full = l
while l[-1] != v2:
print(l)
l = self.shortest_path_from_one_to_other[full[-1]][v2]
full += l
# print (full)
for node in full:
# print(node)
pygame.draw.rect(screen, (0, 255, 0),
(node[1] * MOVE, node[0] * MOVE, 23, 23))
|
MyRobotLab/pyrobotlab
|
home/kwatters/harry/gestures/cyclegesture2.py
|
Python
|
apache-2.0
| 481
| 0.079002
|
def cyclegesture2():
##for x in range(
|
5):
welcome()
sleep(1)
relax()
sleep(2)
fingerright()
sleep(1)
isitaball()
sleep(2)
removeleftarm()
sleep(2)
handdown()
sleep(1)
fullspeed()
i01.giving()
sleep(5)
removeleftarm()
sleep(4)
takeball()
sleep(1)
surrender()
sleep(6)
isitaball()
sleep(6)
dropit()
sleep(2)
removeleftarm()
sleep(5)
relax()
sleep()
fullspeed()
sleep(5)
madeby()
relax()
s
|
leep(5)
i01.disable()
|
lzanuz/django-watermark
|
setup.py
|
Python
|
bsd-3-clause
| 1,660
| 0.001205
|
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.j
|
oin(os.path.abspath(__file__), os.pardir)))
from watermarker import __vers
|
ion__
setup(
name='django-watermark',
version=__version__,
packages=find_packages(exclude=['example']),
include_package_data=True,
license='BSD License',
description="Quick and efficient way to apply watermarks to images in Django.",
long_description=README,
keywords='django, watermark, image, photo, logo',
url='http://github.com/bashu/django-watermark/',
author='Josh VanderLinden',
author_email='codekoala@gmail.com',
maintainer='Basil Shubin',
maintainer_email='basil.shubin@gmail.com',
install_requires=[
'django>=1.4',
'django-appconf',
'pillow',
'six',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Artistic Software',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics'
],
zip_safe=False
)
|
Alidron/alidron-isac
|
isac/transport/pyre_node.py
|
Python
|
mpl-2.0
| 6,714
| 0.00134
|
# Copyright (c) 2015-2020 Contributors as noted in the AUTHORS file
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# System imports
import json
import logging
import re
import uuid
from threading import Event
# Third-party imports
from pyre import Pyre
# Local imports
from ..tools import zmq, green # , spy_call, w_spy_call, spy_object
logger = logging.getLogger(__name__)
class PyreNode(Pyre):
def __init__(self, *args, **kwargs):
# spy_object(self, class_=Pyre, except_=['name', 'uuid'], with_caller=False)
# spy_call(self.__init__, args, kwargs, with_caller=False); print
self._name = None
self._uuid = None
super(self.__class__, self).__init__(*args, **kwargs)
self.request_results = {} # TODO: Fuse the two dicts
self.request_events = {}
self.poller = zmq.Poller()
self.poller.register(self.inbox, zmq.POLLIN)
self.join('SURVEY')
def run(self):
self.task = green.spawn(self._run, 100)
def _run(self, timeout=None):
s
|
elf._running = True
self.start()
while self._running:
try:
# logger.debug('Polling')
|
items = dict(self.poller.poll(timeout))
# logger.debug('polled out: %s, %s', len(items), items)
while len(items) > 0:
for fd, ev in items.items():
if (self.inbox == fd) and (ev == zmq.POLLIN):
self._process_message()
# logger.debug('quick polling')
items = dict(self.poller.poll(0))
# logger.debug('qpoll: %s, %s', len(items), items)
except (KeyboardInterrupt, SystemExit):
logger.debug('(%s) KeyboardInterrupt or SystemExit', self.name())
break
logger.debug('(%s) Exiting loop and stopping', self.name())
self.stop()
def _process_message(self):
logger.debug('(%s) processing message', self.name())
msg = self.recv()
logger.debug('(%s) received stuff: %s', self.name(), msg)
msg_type = msg.pop(0)
logger.debug('(%s) msg_type: %s', self.name(), msg_type)
peer_id = uuid.UUID(bytes=msg.pop(0))
logger.debug('(%s) peer_id: %s', self.name(), peer_id)
peer_name = msg.pop(0)
logger.debug('(%s) peer_name: %s', self.name(), peer_name)
if msg_type == b'ENTER':
self.on_peer_enter(peer_id, peer_name, msg)
elif msg_type == b'EXIT':
self.on_peer_exit(peer_id, peer_name, msg)
elif msg_type == b'SHOUT':
self.on_peer_shout(peer_id, peer_name, msg)
elif msg_type == b'WHISPER':
self.on_peer_whisper(peer_id, peer_name, msg)
def on_peer_enter(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE ENTER: %s, %s', self.name(), peer_name, peer_id)
pub_endpoint = self.get_peer_endpoint(peer_id, 'pub')
rpc_endpoint = self.get_peer_endpoint(peer_id, 'rpc')
self.on_new_peer(peer_id, peer_name, pub_endpoint, rpc_endpoint)
def on_new_peer(self, peer_id, peer_name, pub_endpoint, rpc_endpoint):
pass
def on_peer_exit(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE EXIT: %s, %s', self.name(), peer_name, peer_id)
self.on_peer_gone(peer_id, peer_name)
def on_peer_gone(self, peer_id, peer_name):
pass
def on_peer_shout(self, peer_id, peer_name, msg):
group = msg.pop(0)
data = msg.pop(0)
logger.debug('(%s) ZRE SHOUT: %s, %s > (%s) %s',
self.name(), peer_name, peer_id, group, data)
if group == b'SURVEY':
self.on_survey(peer_id, peer_name, json.loads(data))
elif group == b'EVENT':
self.on_event(peer_id, peer_name, json.loads(data))
def on_survey(self, peer_id, peer_name, request):
pass
def on_event(self, peer_id, peer_name, request):
pass
def on_peer_whisper(self, peer_id, peer_name, msg):
logger.debug('(%s) ZRE WHISPER: %s, %s > %s', self.name(), peer_name, peer_id, msg)
reply = json.loads(msg[0])
if reply['req_id'] in self.request_results:
logger.debug('(%s) Received reply from %s: %s', self.name(), peer_name, reply['data'])
self.request_results[reply['req_id']].append((peer_name, reply['data']))
ev, limit_peers = self.request_events[reply['req_id']]
if limit_peers and (len(self.request_results[reply['req_id']]) >= limit_peers):
ev.set()
green.sleep(0) # Yield
else:
logger.warning(
'(%s) Discarding reply from %s because the request ID is unknown',
self.name(), peer_name
)
def get_peer_endpoint(self, peer, prefix):
pyre_endpoint = self.peer_address(peer)
ip = re.search('.*://(.*):.*', pyre_endpoint).group(1)
return '%s://%s:%s' % (
self.peer_header_value(peer, prefix + '_proto'),
ip,
self.peer_header_value(peer, prefix + '_port')
)
def join_event(self):
self.join('EVENT')
def leave_event(self):
self.leave('EVENT')
def send_survey(self, request, timeout, limit_peers):
# request['req_id'] = ('%x' % randint(0, 0xFFFFFFFF)).encode()
self.request_results[request['req_id']] = []
ev = Event()
self.request_events[request['req_id']] = (ev, limit_peers)
self.shout('SURVEY', json.dumps(request).encode())
ev.wait(timeout)
result = self.request_results[request['req_id']]
del self.request_results[request['req_id']]
del self.request_events[request['req_id']]
return result
def send_event(self, request):
self.shout('EVENT', json.dumps(request).encode())
def reply_survey(self, peer_id, reply):
self.whisper(peer_id, json.dumps(reply).encode())
def shutdown(self):
self._running = False
def name(self):
if self._name is None:
# f = w_spy_call(super(self.__class__, self).name, with_caller=False)
f = super(self.__class__, self).name
self._name = f()
return self._name
def uuid(self):
if self._uuid is None:
# f = w_spy_call(super(self.__class__, self).uuid, with_caller=False)
f = super(self.__class__, self).uuid
self._uuid = f()
return self._uuid
|
hehongliang/tensorflow
|
tensorflow/python/training/checkpointable/util_test.py
|
Python
|
apache-2.0
| 72,153
| 0.005502
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.pytho
|
n.ops import variables
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import momentum
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
|
as checkpointable_utils
class NonLayerCheckpointable(tracking.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerCheckpointable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
checkpointable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = checkpointable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = checkpointable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = checkpointable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = checkpointable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
checkpointable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(checkpointable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = checkpointable_utils._serialize_object_graph(
obj, saveables_cache=None)
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.Checkpointable):
def __init__(self):
pass
# __init__ for Checkpointable will be called implicitly.
checkpointable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.Checkpointable()
v1 = checkpointable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = checkpointable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
def testObjectMetadata(self):
with context.eager_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = checkpointable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = checkpointable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
def testNotCheckpointable(self):
class CallsFunctionalStuff(
tracking.NotCheckpointable, tracking.Checkpointable):
pass
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(x=CallsFunctionalStuff())
with self.assertRaises(NotImplementedError):
checkpoint.save(prefix)
class CallsFunctionalStuffOtherMRO(
tracking.Checkpointable, tracking.NotCheckpointable):
pass
checkpoint_reversed = checkpointable_utils.Checkpoint(
x=CallsFunctionalStuffOtherMRO())
with self.assertRaises(NotImplementedError):
checkpoint_reversed.save(prefix)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_object_graph_no_attributes(self):
root = tracking.Checkpointable()
root.v = resource_variable_ops.ResourceVariable(1.)
root.opt =
|
sysadminmatmoz/ingadhoc
|
account_invoice_commercial/__init__.py
|
Python
|
agpl-3.0
| 366
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
#
|
directory
##############################################################################
from .
|
import account_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kutu/pyirsdk
|
setup.py
|
Python
|
mit
| 783
| 0
|
#!python3
from setuptools import setup
from irsdk import VERSION
setup(
name='pyirsdk',
version=VERSION,
description='Python 3 implementation of iRacing SDK',
author='Mihail Latyshov',
author_email='kutu182@gmail.com',
url='https://github.com/kutu/pyirsdk',
py_modules=['irsdk'],
license='MIT',
platforms=['win64'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Ope
|
rating System :: Microsoft :: Windows',
'Program
|
ming Language :: Python :: 3.7',
'Topic :: Utilities',
],
entry_points={
'console_scripts': ['irsdk = irsdk:main'],
},
install_requires=[
'PyYAML >= 5.3',
],
)
|
OpenTSDB/tcollector
|
collectors/0/riak.py
|
Python
|
lgpl-3.0
| 5,780
| 0.000519
|
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2013 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
# Written by Mark Smith <mark@qq.is>.
#
"""A collector to gather statistics from a Riak node.
The following all have tags of 'type' which can be 'get' or 'put'. Latency
is measured in fractional seconds. All latency values are calculated over the
last 60 seconds and are moving values.
- riak.vnode.requests
- riak.node.requests
- riak.node.latency.mean
- riak.node.latency.median
- riak.node.latency.95th
- riak.node.latency.99th
- riak.node.latency.100th
These metrics have no tags and are global:
- riak.memory.t
|
otal
- riak.memory.allocated
- riak.executing_mappers
- riak.sys_process_count
- riak.read_repairs
- riak.connections
- riak.connected_node
|
s
"""
import json
import os
import sys
import time
from collectors.etc import riak_conf
from collectors.lib import utils
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
CONFIG = riak_conf.get_default_config()
MAP = {
'vnode_gets_total': ('vnode.requests', 'type=get'),
'vnode_puts_total': ('vnode.requests', 'type=put'),
'vnode_gets': ('vnode.requests.last.minute', 'type=get'),
'vnode_puts': ('vnode.requests.last.minute', 'type=put'),
'vnode_index_reads': ('vnode.indexing', 'type=read'),
'vnode_index_writes': ('vnode.indexing', 'type=write'),
'vnode_index_deletes': ('vnode.indexing', 'type=delete'),
'vnode_index_writes_postings': ('vnode.index.posting', 'type=write'),
'vnode_index_deletes_postings': ('vnode.index.posting', 'type=delete'),
'node_gets_total': ('node.requests', 'type=get'),
'node_puts_total': ('node.requests', 'type=put'),
'node_gets': ('node.requests.last.minute', 'type=get'),
'node_puts': ('node.requests.last.minute', 'type=put'),
'node_get_fsm_active': ('node.active.fsm', 'type=get'),
'node_put_fsm_active': ('node.active.fsm', 'type=put'),
'node_get_fsm_time_mean': ('node.latency.mean', 'type=get'),
'node_get_fsm_time_median': ('node.latency.median', 'type=get'),
'node_get_fsm_time_95': ('node.latency.95th', 'type=get'),
'node_get_fsm_time_99': ('node.latency.99th', 'type=get'),
'node_get_fsm_time_100': ('node.latency.100th', 'type=get'),
'node_put_fsm_time_mean': ('node.latency.mean', 'type=put'),
'node_put_fsm_time_median': ('node.latency.median', 'type=put'),
'node_put_fsm_time_95': ('node.latency.95th', 'type=put'),
'node_put_fsm_time_99': ('node.latency.99th', 'type=put'),
'node_put_fsm_time_100': ('node.latency.100th', 'type=put'),
'node_get_fsm_rejected': ('node.rejected.fsm', 'type=get'),
'node_put_fsm_rejected': ('node.rejected.fsm', 'type=put'),
'node_get_fsm_siblings_mean': ('node.siblings.mean', ''),
'node_get_fsm_siblings_median': ('node.siblings.median', ''),
'node_get_fsm_siblings_95': ('node.siblings.95th', ''),
'node_get_fsm_siblings_99': ('node.siblings.99th', ''),
'node_get_fsm_siblings_100': ('node.siblings.100th', ''),
'node_get_fsm_objsize_mean': ('node.object.size.mean', ''),
'node_get_fsm_objsize_median': ('node.object.size.median', ''),
'node_get_fsm_objsize_95': ('node.object.size.95th', ''),
'node_get_fsm_objsize_99': ('node.object.size.99th', ''),
'node_get_fsm_objsize_100': ('node.object.size.100th', ''),
'pbc_connects_total': ('connections', ''),
'pbc_active': ('pbc.active', ''),
'read_repairs_total': ('read_repairs', ''),
'sys_process_count': ('sys_process_count', ''),
'executing_mappers': ('executing_mappers', ''),
'mem_allocated': ('memory.allocated', ''),
'mem_total': ('memory.total', ''),
'memory_processes_used': ('memory.erlang', ''),
'index_fsm_active': ('index.active.fsm', ''),
'list_fsm_active': ('key.listing.active', ''),
'cpu_nprocs': ('os.processes', '')
#connected_nodes is calculated
}
def main():
"""Main loop"""
# don't run if we're not a riak node
if not os.path.exists("/usr/lib/riak"):
sys.exit(13)
utils.drop_privileges()
sys.stdin.close()
interval = 15
def print_stat(metric, value, tags=""):
if value is not None:
print("riak.%s %d %s %s" % (metric, ts, value, tags))
while True:
ts = int(time.time())
req = urlopen(CONFIG['stats_endpoint'])
if req is not None:
obj = json.loads(req.read())
for key in obj:
if key not in MAP:
continue
# this is a hack, but Riak reports latencies in microseconds. they're fairly useless
# to our human operators, so we're going to convert them to seconds.
if 'latency' in MAP[key][0]:
obj[key] = obj[key] / 1000000.0
print_stat(MAP[key][0], obj[key], MAP[key][1])
if 'connected_nodes' in obj:
print_stat('connected_nodes', len(obj['connected_nodes']), '')
req.close()
sys.stdout.flush()
time.sleep(interval)
if __name__ == "__main__":
sys.exit(main())
|
privacyidea/privacyidea
|
privacyidea/api/audit.py
|
Python
|
agpl-3.0
| 4,483
| 0.002903
|
# -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2018-11-21 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Remove the audit log based statistics
# 2016-12-20 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Restrict download to certain time
# 2015-07-16 Cornelius Kölbel, <cornelius.koelbel@netknights.it>
# Add statistics endpoint
# 2015-01-20 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# This code is free software;
|
you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but W
|
ITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__="""This is the audit REST API that can be used to search the audit log.
It only provides the method
GET /audit
"""
from flask import (Blueprint, request, current_app, stream_with_context)
from .lib.utils import (send_result, send_file)
from ..api.lib.prepolicy import (prepolicy, check_base_action, auditlog_age,
allowed_audit_realm, hide_audit_columns)
from ..api.auth import admin_required
from ..lib.policy import ACTION
from flask import g
import logging
from ..lib.audit import search, getAudit
from privacyidea.lib.utils import parse_timedelta
log = logging.getLogger(__name__)
audit_blueprint = Blueprint('audit_blueprint', __name__)
@audit_blueprint.route('/', methods=['GET'])
@prepolicy(check_base_action, request, ACTION.AUDIT)
@prepolicy(allowed_audit_realm, request, ACTION.AUDIT)
@prepolicy(auditlog_age, request)
@prepolicy(hide_audit_columns, request)
def search_audit():
"""
return a paginated list of audit entries.
Params can be passed as key-value-pairs.
:httpparam timelimit: A timelimit, that limits the recent audit entries.
This param gets overwritten by a policy auditlog_age. Can be 1d, 1m, 1h.
**Example request**:
.. sourcecode:: http
GET /audit?realm=realm1 HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": [
{
"serial": "....",
"missing_line": "..."
}
]
},
"version": "privacyIDEA unknown"
}
"""
audit_dict = search(current_app.config, request.all_data)
g.audit_object.log({'success': True})
return send_result(audit_dict)
@audit_blueprint.route('/<csvfile>', methods=['GET'])
@prepolicy(check_base_action, request, ACTION.AUDIT_DOWNLOAD)
@prepolicy(auditlog_age, request)
@admin_required
def download_csv(csvfile=None):
"""
Download the audit entry as CSV file.
Params can be passed as key-value-pairs.
**Example request**:
.. sourcecode:: http
GET /audit/audit.csv?realm=realm1 HTTP/1.1
Host: example.com
Accept: text/csv
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: text/csv
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": [
{
"serial": "....",
"missing_line": "..."
}
]
},
"version": "privacyIDEA unknown"
}
"""
audit = getAudit(current_app.config)
g.audit_object.log({'success': True})
param = request.all_data
if "timelimit" in param:
timelimit = parse_timedelta(param["timelimit"])
del param["timelimit"]
else:
timelimit = None
return send_file(stream_with_context(audit.csv_generator(param=param,
timelimit=timelimit)),
csvfile)
|
OpenChemistry/avogadrolibs
|
avogadro/qtplugins/scriptfileformats/formatScripts/zyx.py
|
Python
|
bsd-3-clause
| 2,841
| 0.001408
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
def getMetaData():
metaData = {}
metaData['inputFormat'] = 'xyz'
metaData['outputFormat'] = 'xyz'
metaData['operations'] = ['read', 'write']
metaData['identifier'] = 'ZYX Example Format'
metaData['name'] = 'ZYX'
metaData['description'] = "Mostly useless file format that reads xyz-style " +\
|
"files with reversed coordinates. Demonstrates " +\
"the implementation of a user-scripted file format."
metaData['fileExtensions'] = ['zyx']
metaData['mimeTypes'] = [
|
'chemical/x-zyx']
return metaData
def write():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f' %\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
def read():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f' %\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Example file format script.')
parser.add_argument('--metadata', action='store_true')
parser.add_argument('--read', action='store_true')
parser.add_argument('--write', action='store_true')
parser.add_argument('--display-name', action='store_true')
parser.add_argument('--lang', nargs='?', default='en')
args = vars(parser.parse_args())
if args['metadata']:
print(json.dumps(getMetaData()))
elif args['display_name']:
print(getMetaData()['name'])
elif args['read']:
print(read())
elif args['write']:
print(write())
|
miiila/hungry-in-karlin
|
decide.py
|
Python
|
mit
| 218
| 0
|
import random
from subprocess import call
import ya
|
ml
with open('./venues.yml') as f:
venues = yaml.load(f)
venue = random.choice(venues)
pri
|
nt(venue['name'])
print(venue['url'])
call(['open', venue['url']])
|
dipapaspyros/bdo_platform
|
query_designer/migrations/0013_remove_query_dataset_query.py
|
Python
|
mit
| 405
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-19 14:51
from __future__ import unicode_li
|
terals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('query_designer', '0012_query_dataset_query')
|
,
]
operations = [
migrations.RemoveField(
model_name='query',
name='dataset_query',
),
]
|
dsparrow27/zoocore
|
zoo/libs/pyqt/errors.py
|
Python
|
gpl-3.0
| 777
| 0.002574
|
from widgets import messagebox as msg
class QtBaseException(Exception):
"""
Custom Exception base class used to handle exception with our on subset of options
"""
def __init__(self, message, displayPopup=False, *args):
"""initializes the exception, use cause to di
|
splay the cause of the exception
:param message: The exception to display
:param cause: the cause of the error eg. a variable/class etc
:param args: std Exception args
"""
self.message = message
if displayPopup:
self.showDialog()
super(self.__class__, self).__init__(message, *args)
def showDialog(self):
messageBox = msg.MessageBox()
messageBox.
|
setText(self.message)
messageBox.exec_()
|
ethanbao/artman
|
artman/tasks/requirements/ruby_requirements.py
|
Python
|
apache-2.0
| 1,142
| 0
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific l
|
anguage governing permissions and
# limitations under the License.
"""Requirements for Ruby codegen."""
from artman.tasks.requirements import task_requirement_base
class RubyFormatRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
return ['rubocop']
@classmethod
def install(cls):
# Intentionally do nothing
pass
class RakeRequirements(task_requirement_base
|
.TaskRequirementBase):
@classmethod
def require(cls):
return ['rake']
@classmethod
def install(cls):
# Intentionally do nothing
pass
|
PIVX-Project/PIVX
|
test/functional/rpc_bind.py
|
Python
|
mit
| 6,476
| 0.004324
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running pivxd with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import addr_to_hex, all_interfaces, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import PivxTestFramework, SkipTest
from test_framework.util import (
|
assert_equal,
assert_raises_rpc_error,
get_datadir_path,
get_rpc_proxy,
rpc_port,
rpc_url
)
class RPCBindTest(PivxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.nu
|
m_nodes, None)
def add_options(self, parser):
parser.add_option("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_option("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_option("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamtvsupertuga/lib/resources/lib/sources/en_torrents/__init__.py
|
Python
|
gpl-2.0
| 199
| 0.01005
|
# -*- coding: utf-8 -*-
i
|
mport os.path
files = os.listdir(os.path.dirname(__file__))
__all__ = [filename[:-3] for filename in files if not filename.startswith('__') and filename.endswith('.py')]
| |
GabrielBrascher/cloudstack
|
test/integration/component/test_multiple_nic_support.py
|
Python
|
apache-2.0
| 24,109
| 0.00141
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" tests for supporting multiple NIC's in advanced zone with security groups in cloudstack 4.14.0.0
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.sshClient import SshClient
from marvin.lib.utils import (validateList,
cleanup_resources,
get_host_credentials,
get_process_status,
execute_command_in_host,
random_gen)
from marvin.lib.base import (PhysicalNetwork,
Account,
Host,
TrafficType,
Domain,
Network,
NetworkOffering,
VirtualMachine,
ServiceOffering,
Zone,
NIC,
SecurityGroup)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines,
list_routers,
list_hosts,
get_free_vlan)
from marvin.codes import (PASS, FAILED)
import logging
import random
import time
class TestMulipleNicSupport(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMulipleNicSupport,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) != "True":
cls.skip = True
return
cls.logger = logging.getLogger("TestMulipleNicSupport")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, hypervisor="KVM")
if cls.template == FAILED:
cls.skip = True
return
# Create new domain, account, network and VM
cls.user_domain = Domain.create(
cls.apiclient,
services=cls.testdata["acl"]["domain2"],
parentdomainid=cls.domain.id)
# Create account
cls.account1 = Account.create(
cls.apiclient,
cls.testdata["acl"]["accountD2"],
admin=True,
domainid=cls.user_domain.id
)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
# Enable Network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls._cleanup.append(cls.network_offering)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
if cls.zone.securitygroupsenabled:
# Enable networking for reaching to VM thorugh SSH
security_group = SecurityGroup.create(
cls.apiclient,
cls.testdata["security_group"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule2 = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule_ICMP"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
cls.testdata["shared_network_offering_sg"]["specifyVlan"] = 'True'
cls.testdata["shared_network_offering_sg"]["specifyIpRanges"] = 'True'
cls.shared_network_offering = NetworkOffering.create(
cls.apiclient,
cls.testdata["shared_network_offering_sg"],
conservemode=False
)
NetworkOffering.update(
cls.shared_network_offering,
cls.apiclient,
id=cls.shared_network_offering.id,
state="enabled"
)
physical_network, vlan = get_free_vlan(cls.apiclient, cls.zone.id)
cls.testdata["shared_network_sg"]["physicalnetworkid"] = physical_network.id
random_subnet_number
|
= random.randrange(90, 99)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network
|
-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network1 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(100, 110)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network2 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(111, 120)
|
Gab-km/papylon
|
tests/test_gen.py
|
Python
|
mit
| 4,520
| 0.002434
|
def test_gen_generate_returns_generated_value():
from papylon.gen import Gen
def gen():
while True:
yield 1
sut = Gen(gen)
actual = sut.generate()
assert actual == 1
def test_such_that_returns_new_ranged_gen_instance():
from papylon.gen import choose
gen = choose(-20, 20)
new_gen = gen.such_that(lambda x: 0 <= x <= 20)
actual = new_gen.generate()
assert 0 <= actual <= 20
def test_such_that_returns_no_hit_gen_and_raise_stop_generation_when_generate_called():
from papylon.gen import choose, StopGeneration
gen = choose(-30, 30)
new_gen = gen.such_that(lambda x: 31 <= x)
try:
new_gen.generate()
except StopGeneration:
assert True
return
assert False
def test_when_one_of_takes_a_gen_list_then_returns_one_of_the_gen_instance_in_the_list():
from papylon.gen import one_of, constant
sut = one_of(list(map(constant, [1, 4, 9])))
actual = sut.generate()
assert actual in [1, 4, 9]
def test_when_choose_takes_a_string_argument_as_min_value_then_raises_type_error():
from papylon.gen import choose
try:
choose("1", 2)
except TypeError:
assert True
return
assert False
def test_when_choose_takes_a_list_argument_as_max_value_then_raises_type_error():
from papylon.gen import choose
try:
choose(1, [2])
except TypeError:
assert True
return
assert False
def test_when_choose_takes_arguments_where_min_value_is_greater_than_max_value_then_raises_value_error():
from papylon.gen import choose
try:
choose(3, 2.0)
except ValueError:
assert True
return
assert False
def test_when_choose_takes_arguments_where_min_value_is_equal_to_max_value_then_raises_value_error():
|
from papylon.gen import choose
try:
choose(-1, -1)
except ValueError:
assert True
return
assert False
def test_when_choose_takes_arguments_where_min_value_is_float_then_returns_gen_instance_which_generates_float_value():
from papylon.gen import choose
sut = choose(-2.0, 2)
actual = sut.generate()
assert type(actual)
|
== float
assert -2.0 <= actual <= 2.0
def test_when_choose_takes_arguments_where_max_value_is_float_then_returns_gen_instance_which_generates_float_value():
from papylon.gen import choose
sut = choose(-5, 10.0)
actual = sut.generate()
assert type(actual) == float
assert -5.0 <= actual <= 10.0
def test_when_choose_takes_arguments_both_of_which_are_int_then_returns_gen_instance_which_generates_int_value():
from papylon.gen import choose
sut = choose(-50, 50)
actual = sut.generate()
assert type(actual) == int
assert -50 <= actual <= 50
def test_when_frequency_runs_10000_times_then_its_choices_should_be_satisfied_with_accuracy_ge94_percents():
from papylon.gen import frequency, constant
weighted_gens = [(5, constant(1)), (3, constant(10)), (2, constant(100))]
count_1, count_10, count_100 = 0, 0, 0
parameter = 10000
for i in range(parameter):
sut = frequency(weighted_gens)
value = sut.generate()
if value == 1:
count_1 += 1
elif value == 10:
count_10 += 1
elif value == 100:
count_100 += 1
else:
assert False
def assert_frequency(actual, param, weight, accuracy):
return actual >= param * weight * accuracy
assuring_accuracy = 0.94
assert assert_frequency(count_1, parameter, 0.5, assuring_accuracy)
assert assert_frequency(count_10, parameter, 0.3, assuring_accuracy)
assert assert_frequency(count_100, parameter, 0.2, assuring_accuracy)
def test_map_should_create_new_gen_instance_with_mapper_function():
from papylon.gen import choose
gen = choose(1, 10)
new_gen = gen.map(lambda x: x * 2)
generated_by_new_gen = new_gen.generate()
assert type(generated_by_new_gen) == int
assert generated_by_new_gen in range(2, 21, 2)
generated_by_gen = gen.generate()
assert type(generated_by_gen) == int
assert generated_by_gen in range(1, 11)
def test_given_a_value_v_when_constant_v_then_returns_gen_instance_which_generates_only_v():
from papylon.gen import constant
value = 6
sut = constant(value)
count = 0
trial = 10
for i in range(trial):
result = sut.generate()
if result == value:
count += 1
assert count == trial
|
ColtonProvias/pytest-watch
|
pytest_watch/watcher.py
|
Python
|
mit
| 6,256
| 0.00016
|
from __future__ import print_function
import os
import time
import subprocess
from colorama import Fore, Style
from watchdog.events import (
FileSystemEventHandler, FileModifiedEvent, FileCreatedEvent,
FileMovedEvent, FileDeletedEvent)
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from .spooler import EventSpooler
EVENT_NAMES = {
FileModifiedEvent: 'modified',
FileCreatedEvent: 'created',
FileMovedEvent: 'moved',
FileDeletedEvent: 'deleted',
}
WATCHED_EVENTS = list(EVENT_NAMES)
DEFAULT_EXTENSIONS = ['.py']
CLEAR_COMMAND = 'cls' if os.name == 'nt' else 'clear'
BEEP_CHARACTER = '\a'
STYLE_NORMAL = Fore.RESET
STYLE_HIGHLIGHT = Fore.CYAN + Style.NORMAL + Style.BRIGHT
class ChangeHandler(FileSystemEventHandler):
"""Listens for changes to files and re-runs tests after each change."""
def __init__(self, auto_clear=False, beep_on_failure=True,
onpass=None, onfail=None, beforerun=None, extensions=[],
args=None, spool=True, verbose=False, quiet=False):
super(ChangeHandler, self).__init__()
self.auto_clear = auto_clear
self.beep_on_failure = beep_on_failure
self.onpass = onpass
self.onfail = onfail
self.beforerun = beforerun
self.extensions = extensions or DEFAULT_EXTENSIONS
self.args = args or []
self.spooler = None
if spool:
self.spooler = EventSpooler(0.2, self.on_queued_events)
self.verbose = verbose
self.quiet = quiet
def on_queued_events(self, events):
summary = []
for event in events:
paths = [event.src_path]
if isinstance(event, FileMovedEvent):
paths.append(event.dest_path)
event_name = EVENT_NAMES[type(event)]
paths = tuple(map(os.path.relpath, paths))
if any(os.path.splitext(path)[1].lower() in self.extensions
for path in paths):
summary.append((event_name, paths))
if summary:
|
self.run(sort
|
ed(set(summary)))
def on_any_event(self, event):
if isinstance(event, tuple(WATCHED_EVENTS)):
if self.spooler is not None:
self.spooler.enqueue(event)
else:
self.on_queued_events([event])
def run(self, summary=None):
"""Called when a file is changed to re-run the tests with py.test."""
if self.auto_clear:
subprocess.call(CLEAR_COMMAND, shell=True)
command = ' '.join(['py.test'] + self.args)
if summary and not self.auto_clear:
print()
if not self.quiet:
highlight = lambda arg: STYLE_HIGHLIGHT + arg + STYLE_NORMAL
msg = 'Running: {}'.format(highlight(command))
if summary:
if self.verbose:
file_lines = [' {:9s}'.format(event_name + ':') + ' ' +
' -> '.join(map(highlight, paths))
for event_name, paths in summary]
msg = ('Changes detected in files:\n{}\n\nRerunning: {}'
.format('\n'.join(file_lines), highlight(command)))
else:
msg = ('Changes detected, rerunning: {}'
.format(highlight(command)))
print(STYLE_NORMAL + msg + Fore.RESET + Style.NORMAL)
if self.beforerun:
os.system(self.beforerun)
exit_code = subprocess.call(['py.test'] + self.args,
shell=subprocess.mswindows)
passed = exit_code == 0
# Beep if failed
if not passed and self.beep_on_failure:
print(BEEP_CHARACTER, end='')
# Run custom commands
if passed and self.onpass:
os.system(self.onpass)
elif not passed and self.onfail:
os.system(self.onfail)
def watch(directories=[], ignore=[], auto_clear=False, beep_on_failure=True,
onpass=None, onfail=None, beforerun=None, poll=False, extensions=[],
args=[], spool=True, verbose=False, quiet=False):
if not directories:
directories = ['.']
directories = [os.path.abspath(directory) for directory in directories]
for directory in directories:
if not os.path.isdir(directory):
raise ValueError('Directory not found: ' + directory)
if ignore:
recursive_dirs, non_recursive_dirs = split_recursive(
directories, ignore)
else:
recursive_dirs = directories
non_recursive_dirs = []
# Initial run
event_handler = ChangeHandler(auto_clear, beep_on_failure,
onpass, onfail, beforerun, extensions, args,
spool, verbose, quiet)
event_handler.run()
# Setup watchdog
observer = PollingObserver() if poll else Observer()
for directory in recursive_dirs:
observer.schedule(event_handler, path=directory, recursive=True)
for directory in non_recursive_dirs:
observer.schedule(event_handler, path=directory, recursive=False)
# Watch and run tests until interrupted by user
try:
observer.start()
while True:
time.sleep(1)
observer.join()
except KeyboardInterrupt:
observer.stop()
def samepath(left, right):
return (os.path.abspath(os.path.normcase(left)) ==
os.path.abspath(os.path.normcase(right)))
def split_recursive(directories, ignore):
non_recursive_dirs = []
recursive_dirs = []
for directory in directories:
subdirs = [os.path.join(directory, d)
for d in os.listdir(directory)
if os.path.isdir(d)]
filtered = [subdir for subdir in subdirs
if not any(samepath(os.path.join(directory, d), subdir)
for d in ignore)]
if len(subdirs) == len(filtered):
recursive_dirs.append(directory)
else:
non_recursive_dirs.append(directory)
recursive_dirs.extend(filtered)
return sorted(set(recursive_dirs)), sorted(set(non_recursive_dirs))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.