code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Copyright 2015 StackHut Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
StackHut interface and modifications to Barrister RPC library
"""
import os
import json
import uuid
import signal
from enum import Enum
import sh
from ..barrister import err_response, ERR_PARSE, ERR_INVALID_REQ, ERR_METHOD_NOT_FOUND, \
ERR_INVALID_PARAMS, ERR_INTERNAL, ERR_UNKNOWN, ERR_INVALID_RESP, \
parse, contract_from_file, RpcException
from ..utils import log
CONTRACTFILE = '.api.json'
IDLFILE = 'api.idl'
REQ_FIFO = '.req.json'
RESP_FIFO = '.resp.json'
"""
High-level interface into the IDL file
- based on the JSON compiled output that is parsed into an AST
- used from runtime introspection
"""
class ContactTypes(Enum):
int = 1
string = 2
bool = 3
array = 4
obj = 5
def render_signature(func):
def render_params(p):
pp_p = "{} {}".format(p.type, p.name)
return '[]' + pp_p if p.is_array else pp_p
params_t = str.join(', ', [render_params(p) for p in func.params])
if func.returns is not None:
return "{}({}) {}".format(func.name, params_t, render_params(func.returns))
else:
return "{}({}) {}".format(func.name, params_t)
def load_contract_file():
return contract_from_file(CONTRACTFILE)
def generate_contract_file():
"""
Generate the IDL -> JSON Contract file
main interface into barrister parser
"""
if not os.path.exists(IDLFILE):
raise AssertionError("Cannot find 'api.idl' interface definition file")
with open(IDLFILE, 'r') as idl_file, open(CONTRACTFILE, "w") as contract_file:
parsed = parse(idl_file, IDLFILE)
contract_file.write(json.dumps(parsed, indent=4))
####################################################################################################
# Error handling
ERR_SERVICE = -32002
class ParseError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_PARSE, 'Parse Error', data)
class InvalidReqError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_INVALID_REQ, 'Invalid Request', data)
class MethodNotFoundError(RpcException):
def __init__(self, data=None):
super().__init__(ERR_METHOD_NOT_FOUND, 'Method Not Found', data)
class InternalError(RpcException):
def __init__(self, msg='', data=None):
super().__init__(ERR_INTERNAL, 'Internal Error - {}'.format(msg), data)
class ServiceError(RpcException):
def __init__(self, msg, data=None):
super().__init__(ERR_SERVICE, 'Service Error - {}'.format(msg), data)
class CustomError(RpcException):
def __init__(self, code, msg, data=None):
super().__init__(code, 'Error - {}'.format(msg), data)
class NonZeroExitError(RpcException):
def __init__(self, exit_code, stderr):
data = dict(exit_code=exit_code, stderr=stderr)
super().__init__(-32001, 'Sub-command returned a non-zero exit', data)
def exc_to_json_error(e, req_id=None):
return err_response(req_id, e.code, e.msg, e.data)
from enum import Enum
class SHCmds(Enum):
startup = 1
shutdown = 2
preBatch = 3
postBatch = 4
def add_get_id(d):
"""add id to json rpc if not present"""
if 'id' not in d:
d['id'] = str(uuid.uuid4())
return d['id']
class StackHutRPC:
"""
Alt. implementation of Barrister.server modified for StackHut needs
Performs
* 'Type'-checking of requests and responces per interface def
* loading the lang-specfic shim/client
* passing messages between the runner and shim/client process
"""
def __init__(self, backend, shim_cmd):
self.contract = contract_from_file(CONTRACTFILE)
self.backend = backend
# setup fifos
os.mkfifo(REQ_FIFO)
os.mkfifo(RESP_FIFO)
# run the shim
cmd = sh.Command(shim_cmd[0])
self.p = cmd(shim_cmd[1:], _bg=True, _out=lambda x: log.debug("Runner - {}".format(x.rstrip())),
_err=lambda x: log.error("Runner - {}".format(x.rstrip())))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
def handler(signum, frame):
log.error("Force-quitting RPC subprocess")
self.p.kill()
raise TimeoutError()
# Set the signal handler and a 5-second alarm
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
# send shutdown msg to each iface
for iface in self.contract.interfaces.keys():
log.debug("Send shutdown to {}".format(iface))
self._cmd_call('{}.{}'.format(iface, SHCmds.shutdown.name))
log.debug("Terminating RPC sub-process")
try:
self.p.terminate()
self.p.wait()
except sh.SignalException_15:
log.warn("RPC subprocess shutdown uncleanly")
pass
signal.alarm(0)
def _cmd_call(self, cmd):
log.debug('Sending cmd message - {}'.format(cmd))
resp = self._sub_call(cmd, [], 'shcmd')
log.debug("Cmd response - {}".format(resp))
def _req_call(self, req):
"""Make RPC call for a single request"""
req_id = None
try:
if type(req) is not dict:
raise InvalidReqError(dict(msg="%s is not an object.".format(req)))
# massage the data (if needed)
req_id = add_get_id(req)
if 'jsonrpc' not in req:
req['jsonrpc'] = "2.0"
if "method" not in req:
raise InvalidReqError(dict(msg="No method"))
# return the idl - TODO - move into Scala
if req['method'] == "common.barrister-idl" or req['method'] == "getIdl":
return self.contract.idl_parsed
# add the default interface if none exists
if req['method'].find('.') < 0:
req['method'] = "{}.{}".format('Default', req['method'])
# NOTE - would setup context and run pre/post filters here in Barrister
# Ok, - we're good to go
method = req["method"]
iface_name, func_name = method.split('.')
params = req.get('params', [])
self.contract.validate_request(iface_name, func_name, params)
result = self._sub_call(method, params, req_id)
self.contract.validate_response(iface_name, func_name, result)
resp = dict(jsonrpc="2.0", id=req_id, result=result)
except RpcException as e:
resp = exc_to_json_error(e, req_id)
except Exception as e:
_e = InternalError('Exception', dict(exception=repr(e)))
resp = exc_to_json_error(_e, req_id)
return resp
def _sub_call(self, method, params, req_id):
"""Acutal call to the shim/client subprocess"""
self.backend.create_request_dir(req_id)
# create the (sub-)req
sub_req = dict(method=method, params=params, req_id=req_id)
# blocking-wait to send the request
with open(REQ_FIFO, "w") as f:
f.write(json.dumps(sub_req))
# blocking-wait to read the resp
with open(RESP_FIFO, "r") as f:
sub_resp = json.loads(f.read())
# check the response
if 'error' in sub_resp:
error_code = sub_resp['error']
log.debug(sub_resp)
if error_code == ERR_METHOD_NOT_FOUND:
raise MethodNotFoundError()
elif error_code == ERR_INTERNAL:
raise InternalError(sub_resp['msg'], sub_resp['data'])
else:
raise CustomError(error_code, sub_resp['msg'], sub_resp['data'])
self.backend.del_request_dir(req_id)
# validate and return the response
result = sub_resp['result']
return result
def call(self, task_req):
"""Make RPC call for given task"""
# Massage the data
try:
req = task_req['request']
if type(req) is list:
if len(req) < 1:
return exc_to_json_error(InvalidReqError(data=dict(msg="Empty Batch")))
# find batch interface
iface_name = None
first_method = req[0].get('method', None)
if first_method:
iface_name = 'Default' if first_method.find('.') < 0 else first_method.split('.')[0]
if iface_name:
self._cmd_call('{}.{}'.format(iface_name, SHCmds.preBatch.name))
task_resp = [self._req_call(r) for r in req]
if iface_name:
self._cmd_call('{}.{}'.format(iface_name, SHCmds.postBatch.name))
else:
task_resp = self._req_call(req)
except Exception as e:
task_resp = exc_to_json_error(InternalError(repr(e)))
return task_resp
| StackHut/stackhut-toolkit | stackhut_toolkit/common/runtime/rpc.py | Python | apache-2.0 | 9,386 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.contrib.nvcc import have_fp16
def test_basic_build():
tgt = "llvm"
ctx = tvm.cpu()
# func
a = relay.var("a", dtype="float32", shape=(16, 8))
b = relay.var("b", dtype="float32", shape=(8, 8))
c = relay.var("c", dtype="float32", shape=(16, 8))
x = relay.nn.dense(a, b)
y = relay.nn.relu(x)
z = y + c
func = relay.Function([a, b, c], z)
A = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), ctx=ctx)
B = tvm.nd.array(np.random.uniform(-1, 1, (8, 8)).astype("float32"), ctx=ctx)
C = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), ctx=ctx)
params = {
"b" : B,
"c" : C
}
# build
targets = {
tvm.expr.IntImm("int32", ctx.device_type): tgt
}
g_json, mmod, params = relay.build(relay.Module.from_expr(func), targets, "llvm", params=params)
# test
rt = tvm.contrib.graph_runtime.create(g_json, mmod, ctx)
rt.set_input("a", A)
rt.load_params(relay.save_param_dict(params))
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.asnumpy(), np.maximum(np.dot(A.asnumpy(),
B.asnumpy().T),
0) + C.asnumpy(),
atol=1e-5, rtol=1e-5)
def test_fp16_build():
dtype = "float16"
if not tvm.module.enabled("cuda") or not tvm.gpu(0).exist:
print("skip because cuda is not enabled.")
return
ctx = tvm.gpu(0)
if dtype == "float16" and not have_fp16(ctx.compute_version):
print("skip because gpu does not support fp16")
return
x = relay.var("x", dtype=dtype, shape=(4, 4))
y = relay.var("y", dtype=dtype, shape=(4, 4))
z = x + y
func = relay.Function([x, y], z)
X = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), ctx=ctx)
Y = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), ctx=ctx)
params = {
"x": X,
"y": Y,
}
# build
g_json, mmod, params = relay.build(func, "cuda", params=params)
# test
rt = tvm.contrib.graph_runtime.create(g_json, mmod, ctx)
rt.load_params(relay.save_param_dict(params))
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.asnumpy(), X.asnumpy() + Y.asnumpy(),
atol=1e-5, rtol=1e-5)
def test_fp16_conversion():
def check_conversion(tgt, ctx):
if not tvm.module.enabled(tgt):
print("skip because {} is not enabled.".format(tgt))
return
elif tgt == "cuda" and ctx.exist and not have_fp16(ctx.compute_version):
print("skip because gpu does not support fp16")
return
n = 10
for (src, dst) in [('float32', 'float16'), ('float16', 'float32')]:
x = relay.var("x", relay.TensorType((n,), src))
y = x.astype(dst)
func = relay.Function([x], y)
# init input
X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)
# build
with relay.build_config(opt_level=1):
g_json, mmod, params = relay.build(relay.Module.from_expr(func), tgt)
# test
rt = tvm.contrib.graph_runtime.create(g_json, mmod, ctx)
rt.set_input("x", X)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.asnumpy(), X.asnumpy().astype(dst),
atol=1e-5, rtol=1e-5)
for target, ctx in [('llvm', tvm.cpu()), ('cuda', tvm.gpu())]:
check_conversion(target, ctx)
if __name__ == "__main__":
test_basic_build()
test_fp16_build()
test_fp16_conversion()
| Huyuwei/tvm | tests/python/relay/test_cpp_build_module.py | Python | apache-2.0 | 4,614 |
'''
Implements the targetcli root UI.
This file is part of targetcli.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from datetime import datetime
from glob import glob
import os
import shutil
import stat
from configshell_fb import ExecutionError
from rtslib_fb import RTSRoot
from rtslib_fb.utils import ignored
from .ui_backstore import complete_path, UIBackstores
from .ui_node import UINode
from .ui_target import UIFabricModule
default_save_file = "/etc/target/saveconfig.json"
kept_backups = 10
class UIRoot(UINode):
'''
The targetcli hierarchy root node.
'''
def __init__(self, shell, as_root=False):
UINode.__init__(self, '/', shell=shell)
self.as_root = as_root
self.rtsroot = RTSRoot()
def refresh(self):
'''
Refreshes the tree of target fabric modules.
'''
self._children = set([])
UIBackstores(self)
# only show fabrics present in the system
for fm in self.rtsroot.fabric_modules:
if fm.wwns == None or any(fm.wwns):
UIFabricModule(fm, self)
def ui_command_saveconfig(self, savefile=default_save_file):
'''
Saves the current configuration to a file so that it can be restored
on next boot.
'''
self.assert_root()
savefile = os.path.expanduser(savefile)
# Only save backups if saving to default location
if savefile == default_save_file:
backup_dir = os.path.dirname(savefile) + "/backup"
backup_name = "saveconfig-" + \
datetime.now().strftime("%Y%m%d-%H:%M:%S") + ".json"
backupfile = backup_dir + "/" + backup_name
with ignored(IOError):
shutil.copy(savefile, backupfile)
# Kill excess backups
backups = sorted(glob(os.path.dirname(savefile) + "/backup/*.json"))
files_to_unlink = list(reversed(backups))[kept_backups:]
for f in files_to_unlink:
os.unlink(f)
self.shell.log.info("Last %d configs saved in %s." % \
(kept_backups, backup_dir))
self.rtsroot.save_to_file(savefile)
self.shell.log.info("Configuration saved to %s" % savefile)
def ui_command_restoreconfig(self, savefile=default_save_file, clear_existing=False):
'''
Restores configuration from a file.
'''
self.assert_root()
savefile = os.path.expanduser(savefile)
if not os.path.isfile(savefile):
self.shell.log.info("Restore file %s not found" % savefile)
return
errors = self.rtsroot.restore_from_file(savefile, clear_existing)
self.refresh()
if errors:
raise ExecutionError("Configuration restored, %d recoverable errors:\n%s" % \
(len(errors), "\n".join(errors)))
self.shell.log.info("Configuration restored from %s" % savefile)
def ui_complete_saveconfig(self, parameters, text, current_param):
'''
Auto-completes the file name
'''
if current_param != 'savefile':
return []
completions = complete_path(text, stat.S_ISREG)
if len(completions) == 1 and not completions[0].endswith('/'):
completions = [completions[0] + ' ']
return completions
ui_complete_restoreconfig = ui_complete_saveconfig
def ui_command_clearconfig(self, confirm=None):
'''
Removes entire configuration of backstores and targets
'''
self.assert_root()
confirm = self.ui_eval_param(confirm, 'bool', False)
self.rtsroot.clear_existing(confirm=confirm)
self.shell.log.info("All configuration cleared")
self.refresh()
def ui_command_version(self):
'''
Displays the targetcli and support libraries versions.
'''
from targetcli import __version__ as targetcli_version
self.shell.log.info("targetcli version %s" % targetcli_version)
def ui_command_sessions(self, action="list", sid=None):
'''
Displays a detailed list of all open sessions.
PARAMETERS
==========
I{action}
---------
The I{action} is one of:
- B{list} gives a short session list
- B{detail} gives a detailed list
I{sid}
------
You can specify an I{sid} to only list this one,
with or without details.
SEE ALSO
========
status
'''
indent_step = 4
base_steps = 0
action_list = ("list", "detail")
if action not in action_list:
raise ExecutionError("action must be one of: %s" %
", ".join(action_list))
if sid is not None:
try:
int(sid)
except ValueError:
raise ExecutionError("sid must be a number, '%s' given" % sid)
def indent_print(text, steps):
console = self.shell.con
console.display(console.indent(text, indent_step * steps),
no_lf=True)
def print_session(session):
acl = session['parent_nodeacl']
indent_print("alias: %(alias)s\tsid: %(id)i type: " \
"%(type)s session-state: %(state)s" % session,
base_steps)
if action == 'detail':
if self.as_root:
if acl.authenticate_target:
auth = " (authenticated)"
else:
auth = " (NOT AUTHENTICATED)"
else:
auth = ""
indent_print("name: %s%s" % (acl.node_wwn, auth),
base_steps + 1)
for mlun in acl.mapped_luns:
plugin = mlun.tpg_lun.storage_object.plugin
name = mlun.tpg_lun.storage_object.name
if mlun.write_protect:
mode = "r"
else:
mode = "rw"
indent_print("mapped-lun: %d backstore: %s/%s mode: %s" %
(mlun.mapped_lun, plugin, name, mode),
base_steps + 1)
for connection in session['connections']:
indent_print("address: %(address)s (%(transport)s) cid: " \
"%(cid)i connection-state: %(cstate)s" % \
connection, base_steps + 1)
if sid:
printed_sessions = [x for x in self.rtsroot.sessions if x['id'] == int(sid)]
else:
printed_sessions = list(self.rtsroot.sessions)
if len(printed_sessions):
for session in printed_sessions:
print_session(session)
else:
if sid is None:
indent_print("(no open sessions)", base_steps)
else:
raise ExecutionError("no session found with sid %i" % int(sid))
| cloud4life/targetcli-fb | targetcli/ui_root.py | Python | apache-2.0 | 7,712 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.image import base
from tempest import test
class ImagesNegativeTest(base.BaseV2ImageTest):
"""
here we have -ve tests for get_image and delete_image api
Tests
** get non-existent image
** get image with image_id=NULL
** get the deleted image
** delete non-existent image
** delete rimage with image_id=NULL
** delete the deleted image
"""
@test.attr(type=['negative'])
@test.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
def test_get_non_existent_image(self):
# get the non-existent image
non_existent_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound, self.client.get_image,
non_existent_id)
@test.attr(type=['negative'])
@test.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
def test_get_image_null_id(self):
# get image with image_id = NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.get_image, image_id)
@test.attr(type=['negative'])
@test.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
def test_get_delete_deleted_image(self):
# get and delete the deleted image
# create and delete image
body = self.client.create_image(name='test',
container_format='bare',
disk_format='raw')
image_id = body['id']
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# get the deleted image
self.assertRaises(lib_exc.NotFound, self.client.get_image, image_id)
# delete the deleted image
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@test.attr(type=['negative'])
@test.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
def test_delete_non_existing_image(self):
# delete non-existent image
non_existent_image_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@test.attr(type=['negative'])
@test.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
def test_delete_image_null_id(self):
# delete image with image_id=NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@test.attr(type=['negative'])
@test.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
def test_register_with_invalid_container_format(self):
# Negative tests for invalid data supplied to POST /images
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
'test', 'wrong', 'vhd')
@test.attr(type=['negative'])
@test.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
'test', 'bare', 'wrong')
| danielmellado/tempest | tempest/api/image/v2/test_images_negative.py | Python | apache-2.0 | 3,822 |
# -*- coding: utf-8 -*-
"""This file contains utility functions."""
import logging
import re
# Illegal Unicode characters for XML.
ILLEGAL_XML_RE = re.compile(
ur'[\x00-\x08\x0b-\x1f\x7f-\x84\x86-\x9f'
ur'\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]')
def IsText(bytes_in, encoding=None):
"""Examine the bytes in and determine if they are indicative of a text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is a text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assumes the byte sequence is either ASCII, UTF-8, UTF-16 or method
supplied character encoding. Otherwise it will make the assumption the byte
sequence is not text, but a byte sequence.
Args:
bytes_in: The byte sequence passed to the method that needs examination.
encoding: Optional encoding to test, if not defined only ASCII, UTF-8 and
UTF-16 are tried.
Returns:
Boolean value indicating whether or not the byte sequence is a text or not.
"""
# TODO: Improve speed and accuracy of this method.
# Start with the assumption we are dealing with a text.
is_ascii = True
# Check if this is ASCII text string.
for char in bytes_in:
if not 31 < ord(char) < 128:
is_ascii = False
break
# We have an ASCII string.
if is_ascii:
return is_ascii
# Is this already a unicode text?
if isinstance(bytes_in, unicode):
return True
# Check if this is UTF-8
try:
_ = bytes_in.decode('utf-8')
return True
except UnicodeDecodeError:
pass
# TODO: UTF 16 decode is successful in too
# many edge cases where we are not really dealing with
# a text at all. Leaving this out for now, consider
# re-enabling or making a better determination.
#try:
# _ = bytes_in.decode('utf-16-le')
# return True
#except UnicodeDecodeError:
# pass
if encoding:
try:
_ = bytes_in.decode(encoding)
return True
except UnicodeDecodeError:
pass
except LookupError:
logging.error(
u'String encoding not recognized: {0:s}'.format(encoding))
return False
def GetUnicodeString(string):
"""Converts the string to Unicode if necessary."""
if not isinstance(string, unicode):
return str(string).decode('utf8', 'ignore')
return string
def GetInodeValue(inode_raw):
"""Read in a 'raw' inode value and try to convert it into an integer.
Args:
inode_raw: A string or an int inode value.
Returns:
An integer inode value.
"""
if isinstance(inode_raw, (int, long)):
return inode_raw
if isinstance(inode_raw, float):
return int(inode_raw)
try:
return int(inode_raw)
except ValueError:
# Let's do one more attempt.
inode_string, _, _ = str(inode_raw).partition('-')
try:
return int(inode_string)
except ValueError:
return -1
def RemoveIllegalXMLCharacters(string, replacement=u'\ufffd'):
"""Removes illegal Unicode characters for XML.
Args:
string: A string to replace all illegal characters for XML.
replacement: A replacement character to use in replacement of all
found illegal characters.
Return:
A string where all illegal Unicode characters for XML have been removed.
If the input is not a string it will be returned unchanged."""
if isinstance(string, basestring):
return ILLEGAL_XML_RE.sub(replacement, string)
return string
| ostree/plaso | plaso/lib/utils.py | Python | apache-2.0 | 3,494 |
import re
import sqlite3
import csv
import ast
import os
import sys
import fnmatch
import datetime
import xlrd
import win32com.client
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def xlsx_to_arr(xlsx_file, worksheet=0, row_start=0, col_start=0, row_end=-1, col_end=-1):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = wb.sheet_by_index(worksheet)
row_end = ws.nrows if row_end == -1 else row_end
col_end = ws.ncols if col_end == -1 else col_end
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end-1) for row in range(row_start, row_end)]
header = ','.join(arr[0])
return re.sub(r"[\*\.#/\$%\"\(\)&\- ]", "", header), arr[1:]
def csv_to_arr(csv_file, start=0, end=0, has_header=True, delim=',', ignore_col=""):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
if end == 0:
arr = arr[start+1:]
else:
arr = arr[start+1:end]
return re.sub(r"[\*\.#/\$%\d\" ]", "", header), arr
else:
return arr[start:]
return
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def arrs_to_xlsx(filename, header=[], arr=[]):
i = 1
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
for x in range(0, len(header)):
ws = wb.Worksheets(x+1)
for i, cell in enumerate(header[x].split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr[x]):
for j, cell in enumerate(row):
ws.Cells(i+2,j+1).Value = str(cell)
wb.Worksheets(1).Columns.AutoFit()
wb.Worksheets(1).UsedRange.FormatConditions.Add(win32com.client.constants.xlExpression, "", '=OR(AND(ISNUMBER($C1),$C1<>$D1),AND(ISNUMBER($E1),$E1<>$F1))')
wb.Worksheets(1).UsedRange.FormatConditions(1).Interior.ColorIndex = 6
wb.Worksheets(1).UsedRange.FormatConditions(1).StopIfTrue = False
wb.Worksheets(1).Columns("C:F").NumberFormat = "#,##0_);[Red](#,##0);0;@"
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def db_cur(source = ":memory:"):
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
# conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def files_lookup(tgt_dir, pattern, recur_list=False, sub_folder=False, most_recent=True):
filepath_arr = []
for fi in os.listdir(tgt_dir):
full_path = os.path.join(tgt_dir, fi)
if sub_folder and os.path.isdir(full_path):
filepath_arr += files_lookup(full_path, pattern, recur_list, sub_folder, most_recent)
if fnmatch.fnmatch(fi, pattern):
filepath_arr.append(full_path)
filepath_arr.sort(reverse=most_recent)
if recur_list:
return filepath_arr
else:
return filepath_arr[0]
def recon_sbl(cur):
sbl_header = "Contract,CPCode,Client,StockCode,G1 O/S,FA O/S,G1 Pending,FA Pending"
create_tbl(cur, "g1_inv", "SBLCODE,CPTY,STOCK,OS,PD")
create_tbl(cur, "fa_inv", "EXT,DESK,STOCK,OS,PD")
cur.execute("""
insert into fa_inv
select ExternalReference, ClientCode, StockCode, sum(case when date('now') > ValueDate then Qty else 0 end), sum(case when date('now') <= ValueDate then Qty else 0 end)
from fasbl
group by ExternalReference, ClientCode, StockCode
""")
cur.execute("""
insert into g1_inv
select SBLCODE, business, STOCK||' HK Equity', sum(case when source = 'os' then QTY else 0 end), sum(case when source = 'pd' then QTY else 0 end)
from (
select sblmap.SBLCode as SBLCODE, sblmap.Name as business, cast(STOCK as int) as STOCK, case when PTYPE = 'B' then -QTY else QTY end as QTY, 'os' as source
from os join sblmap on os.CPTY = sblmap.SBLCode
where cast(STOCK as int) <> 0
UNION ALL
select sblmap.SBLCode as SBLCODE, sblmap.Name as business, cast(STOCK as int) as STOCK, case when (BL = 'L' and STATUS = 'R') or (BL = 'B' and STATUS = 'L') then -QTY else QTY end as QTY, 'pd' as source
from pd join sblmap on pd.CPTY = sblmap.SBLCode
where cast(STOCK as int) <> 0
) aggrg
where STOCK <> ''
group by business, STOCK
""")
cur.execute("""
select EXT, SBLCode, CPTY, STOCK, sbl_os, fa_os, sbl_pd, fa_pd
from (
select EXT, SBLCODE, g1_inv.CPTY as CPTY, g1_inv.STOCK as STOCK, g1_inv.OS as sbl_os, ifnull(fa_inv.OS, 0) as fa_os, g1_inv.PD as sbl_pd, ifnull(fa_inv.PD, 0) as fa_pd
from g1_inv left join fa_inv
on g1_inv.CPTY = fa_inv.DESK
and g1_inv.STOCK = fa_inv.STOCK
union
select EXT, SBLCODE, fa_inv.DESK as CPTY, fa_inv.STOCK as STOCK, ifnull(g1_inv.OS, 0) as sbl_os, fa_inv.OS as fa_os, ifnull(g1_inv.PD, 0) as sbl_pd, fa_inv.PD as fa_pd
from fa_inv left join g1_inv
on g1_inv.CPTY = fa_inv.DESK
and g1_inv.STOCK = fa_inv.STOCK
) consol
where sbl_os <> 0 or fa_os <> 0 or sbl_pd <> 0 or fa_pd <> 0
""")
sbl_arr = cur.fetchall()
# for row in sbl_arr:
# print row
return sbl_header, sbl_arr
def conv_xl_dt(xl_dt):
dt = datetime.datetime.fromordinal(datetime.datetime(1900, 1, 1).toordinal() + int(xl_dt) - 2).date().strftime("%Y-%m-%d")
# tt = dt.timetuple()
return dt
def conv_xl_dt_arr(arr, cols):
return [ [ conv_xl_dt(ele) if idx in cols else ele for idx, ele in enumerate(row) ] for row in arr ]
def main():
conn, cur = db_cur()
pb_dir = os.path.dirname(os.path.abspath(__file__))
# pb_dir = "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data"
sbl_dir = os.path.dirname(os.path.abspath(__file__))
# sbl_dir = "\\\\P7FS0001\\ED\\SBL\\Reports\\Daily SBL Report\\ReportData"
output_dir = "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\SBL FA Deltaone Recon"
sblmap_file = files_lookup(pb_dir, "ClientDetails_????????.xlsx")
fasbl_file = files_lookup(pb_dir, "RepoSBLTrade_????????.xlsx")
os_file = files_lookup(sbl_dir, "OS_Trades_Extract_*.CSV")
pd_file = files_lookup(sbl_dir, "Pending_Trades_Extract_*.CSV")
print (sblmap_file)
print (fasbl_file)
print (os_file)
print (pd_file)
trd_date = sblmap_file[-13:-5]
inv_file = os.path.join(output_dir, "FA_G1_SBL_recon_"+trd_date+".xlsx")
sblmap_header, sblmap_arr = xlsx_to_arr(sblmap_file, row_start=1)
sblmap_header = sblmap_header.replace("ClientId", "ClientId1", 1)
fasbl_header, fasbl_arr = xlsx_to_arr(fasbl_file, row_start=1)
fasbl_arr = conv_xl_dt_arr(fasbl_arr, [3, 4])
os_header, os_arr = csv_to_arr(os_file, 1, -1, True, '\t')
pd_header, pd_arr = csv_to_arr(pd_file, 1, -1, True, '\t')
pd_header = pd_header.replace("BL","B_L",1)
create_tbl(cur, "sblmap", sblmap_header, sblmap_arr)
create_tbl(cur, "os", os_header, os_arr)
create_tbl(cur, "pd", pd_header, pd_arr)
create_tbl(cur, "fasbl", fasbl_header, fasbl_arr)
sbl_header, sbl_arr = recon_sbl(cur)
arrs_to_xlsx(inv_file, [sbl_header], [sbl_arr])
return
if __name__ == "__main__":
print ("D1 G1 SBL Recon")
try:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...") | frederick623/pb | deltaone/d1_sbl_recon.py | Python | apache-2.0 | 7,739 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from st2common.util import isotime
class TestTimeUtil(unittest.TestCase):
def test_add_utc_tz_info(self):
dt = datetime.datetime.utcnow()
self.assertIsNone(dt.tzinfo)
dt = isotime.add_utc_tz(dt)
self.assertIsNotNone(dt.tzinfo)
self.assertEqual(dt.tzinfo.tzname(None), 'UTC')
def test_validate(self):
self.assertTrue(isotime.validate('2000-01-01 12:00:00Z'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+0000'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00+00:00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000Z'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+00'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+0000'))
self.assertTrue(isotime.validate('2000-01-01 12:00:00.000000+00:00'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00Z'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00.000000Z'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00+00:00'))
self.assertTrue(isotime.validate('2000-01-01T12:00:00.000000+00:00'))
self.assertTrue(isotime.validate('2015-02-10T21:21:53.399Z'))
self.assertFalse(isotime.validate('2000-01-01', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00+00:00Z', raise_exception=False))
self.assertFalse(isotime.validate('2000-01-01T12:00:00.000000', raise_exception=False))
self.assertFalse(isotime.validate('Epic!', raise_exception=False))
self.assertFalse(isotime.validate(object(), raise_exception=False))
self.assertRaises(ValueError, isotime.validate, 'Epic!', True)
def test_parse(self):
dt = isotime.add_utc_tz(datetime.datetime(2000, 1, 1, 12))
self.assertEqual(isotime.parse('2000-01-01 12:00:00Z'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00+00'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00+0000'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000Z'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000+00'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000+0000'), dt)
self.assertEqual(isotime.parse('2000-01-01 12:00:00.000000+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00Z'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000000Z'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000000+00:00'), dt)
self.assertEqual(isotime.parse('2000-01-01T12:00:00.000Z'), dt)
def test_format(self):
dt = isotime.add_utc_tz(datetime.datetime(2000, 1, 1, 12))
dt_str_usec_offset = '2000-01-01T12:00:00.000000+00:00'
dt_str_usec = '2000-01-01T12:00:00.000000Z'
dt_str_offset = '2000-01-01T12:00:00+00:00'
dt_str = '2000-01-01T12:00:00Z'
dt_unicode = u'2000-01-01T12:00:00Z'
self.assertEqual(isotime.format(dt, usec=True, offset=True), dt_str_usec_offset)
self.assertEqual(isotime.format(dt, usec=True, offset=False), dt_str_usec)
self.assertEqual(isotime.format(dt, usec=False, offset=True), dt_str_offset)
self.assertEqual(isotime.format(dt, usec=False, offset=False), dt_str)
self.assertEqual(isotime.format(dt_str, usec=False, offset=False), dt_str)
self.assertEqual(isotime.format(dt_unicode, usec=False, offset=False), dt_unicode)
def test_format_tz_naive(self):
dt1 = datetime.datetime.utcnow()
dt2 = isotime.parse(isotime.format(dt1, usec=True))
self.assertEqual(dt2, isotime.add_utc_tz(dt1))
def test_format_tz_aware(self):
dt1 = isotime.add_utc_tz(datetime.datetime.utcnow())
dt2 = isotime.parse(isotime.format(dt1, usec=True))
self.assertEqual(dt2, dt1)
def test_format_sec_truncated(self):
dt1 = isotime.add_utc_tz(datetime.datetime.utcnow())
dt2 = isotime.parse(isotime.format(dt1, usec=False))
dt3 = datetime.datetime(dt1.year, dt1.month, dt1.day, dt1.hour, dt1.minute, dt1.second)
self.assertLess(dt2, dt1)
self.assertEqual(dt2, isotime.add_utc_tz(dt3))
| jtopjian/st2 | st2common/tests/unit/test_isotime.py | Python | apache-2.0 | 5,334 |
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
import json
from intranet.models import User, Project, Part, STATE_CREATED
class Test(TestCase):
@classmethod
def setUpClass(self):
self.c = Client()
User.objects.all().delete()
#Create users
user = User(
username = 'user1',
first_name = 'first',
last_name = 'last',
email = 'user@test.es'
)
user.set_password('dummy')
user.save()
self.user = user
#LOGIN
#response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
#self.assertEqual(response.status_code,200)
#json_response = json.loads(response.content)
#self.assertEqual(json_response['valid'], True)
#self.token_auth = json_response['token_auth']
self.project = Project(
name = 'project 1',
description = 'description project 1',
)
self.project.save()
self.part = Part(
month = 06,
year = 2011,
employee = self.user,
state = 1,
)
self.part.save()
#self.imputation = Imputation(
# part = self.part,
# day = 13,
# hours = 5,
# project = self.project,
#)
#self.imputation.save()
def test_login_logout_ok(self):
self.c = Client()
response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
token_auth = json_response['token_auth']
self.c = Client()
response = self.c.get(reverse('auth-logout'), {'token_auth': token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
def test_logout_invalid(self):
self.c = Client()
response = self.c.get(reverse('api:logout'))
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
def test_project_list(self):
self.c = Client()
response = self.c.get(reverse('api:project-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['projects']), 1)
def test_part_list(self):
self.c = Client()
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
def test_imputation_list(self):
self.c = Client()
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
def test_imputation_create(self):
self.c = Client()
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':self.part.id, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_imp = json_response['id']
#Invalid part
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':222, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
#Invalid day
response = self.c.post(reverse('api:imputation-add'), {'token_auth': self.token_auth, 'day':33, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':1, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
#Delete
response = self.c.get(reverse('api:imputation-delete', args=[id_imp]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
def test_part_creation(self):
self.c = Client()
response = self.c.post(reverse('api:part-add'), {'month': 3, 'year':2008, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_part = json_response['id']
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 2)
parts = json_response['parts']
for part in parts:
if part['id'] == id_part:
self.assertEqual(part['state'], STATE_CREATED)
response = self.c.get(reverse('api:part-delete', args=[id_part]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
| fatihzkaratana/intranet | backend/intranet/tests/api.py | Python | apache-2.0 | 7,125 |
from rest_framework.routers import SimpleRouter
from api.formattedmetadatarecords import views
router = SimpleRouter()
router.register(r'formattedmetadatarecords', views.FormattedMetadataRecordViewSet, basename='formattedmetadatarecord')
urlpatterns = router.urls
| CenterForOpenScience/SHARE | api/formattedmetadatarecords/urls.py | Python | apache-2.0 | 266 |
# -*- coding: utf-8 -*-
#
# Copyright 2015 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
Start = object()
Restart = object()
Resume = object()
Terminate = object()
Restart = "restart"
Terminate = "terminate"
Failure = namedtuple('Failure', ['ref', 'exception', 'traceback'])
Supervise = namedtuple('Supervise', ['ref'])
Terminated = namedtuple('Terminated', ['ref'])
DeadLetter = namedtuple('DeadLetter', ['message', 'sender', 'recipient'])
| tamland/python-actors | actors/internal/messages.py | Python | apache-2.0 | 995 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os
from sqoop.conf import SQOOP_CONF_DIR
LOG = logging.getLogger(__name__)
_PROPERTIES_DICT = None
_CONF_SQOOP_AUTHENTICATION_TYPE = 'org.apache.sqoop.security.authentication.type'
def reset():
global _PROPERTIES_DICT
_PROPERTIES_DICT = None
def get_props():
if _PROPERTIES_DICT is None:
_parse_properties()
return _PROPERTIES_DICT
def has_sqoop_has_security():
return get_props().get(_CONF_SQOOP_AUTHENTICATION_TYPE, 'SIMPLE').upper() == 'KERBEROS'
def _parse_properties():
global _PROPERTIES_DICT
properties_file = os.path.join(SQOOP_CONF_DIR.get(), 'sqoop.properties')
_PROPERTIES_DICT = _parse_site(properties_file)
def _parse_site(site_path):
try:
with open(site_path, 'r') as f:
data = f.read()
except IOError as err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (site_path, err))
return
data = ""
return dict([line.split('=', 1) for line in data.split('\n') if '=' in line and not line.startswith('#')])
| cloudera/hue | apps/sqoop/src/sqoop/sqoop_properties.py | Python | apache-2.0 | 1,837 |
from a10sdk.common.A10BaseClass import A10BaseClass
class Udp(A10BaseClass):
"""Class Description::
Set UDP STUN timeout.
Class udp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param port_start: {"description": "Port Range (Port Range Start)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param port_end: {"description": "Port Range (Port Range End)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param timeout: {"description": "STUN timeout in minutes (default: 2 minutes)", "format": "number", "type": "number", "maximum": 60, "minimum": 0, "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "port_start","port_end"]
self.b_key = "udp"
self.a10_url="/axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}"
self.DeviceProxy = ""
self.port_start = ""
self.port_end = ""
self.timeout = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_lsn_stun_timeout_udp.py | Python | apache-2.0 | 1,644 |
#!/usr/bin/env python3
import sys
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input',
help='the input file')
parser.add_argument('--output', dest='output',
help='the output file')
parser.add_argument('--upper', dest='upper', action='store_true', default=False,
help='Convert to upper case.')
c_templ = '''int %s() {
return 0;
}
'''
options = parser.parse_args(sys.argv[1:])
funcname = open(options.input).readline().strip()
if options.upper:
funcname = funcname.upper()
open(options.output, 'w').write(c_templ % funcname)
| germandiagogomez/meson | test cases/common/98 gen extra/srcgen.py | Python | apache-2.0 | 650 |
from typing import Any, Dict, Mapping, Optional, Tuple
from .exceptions import UnknownUpdateCardAction
SUPPORTED_CARD_ACTIONS = [
u'updateCard',
u'createCard',
u'addLabelToCard',
u'removeLabelFromCard',
u'addMemberToCard',
u'removeMemberFromCard',
u'addAttachmentToCard',
u'addChecklistToCard',
u'commentCard',
u'updateCheckItemStateOnCard',
]
IGNORED_CARD_ACTIONS = [
'createCheckItem',
]
CREATE = u'createCard'
CHANGE_LIST = u'changeList'
CHANGE_NAME = u'changeName'
SET_DESC = u'setDesc'
CHANGE_DESC = u'changeDesc'
REMOVE_DESC = u'removeDesc'
ARCHIVE = u'archiveCard'
REOPEN = u'reopenCard'
SET_DUE_DATE = u'setDueDate'
CHANGE_DUE_DATE = u'changeDueDate'
REMOVE_DUE_DATE = u'removeDueDate'
ADD_LABEL = u'addLabelToCard'
REMOVE_LABEL = u'removeLabelFromCard'
ADD_MEMBER = u'addMemberToCard'
REMOVE_MEMBER = u'removeMemberFromCard'
ADD_ATTACHMENT = u'addAttachmentToCard'
ADD_CHECKLIST = u'addChecklistToCard'
COMMENT = u'commentCard'
UPDATE_CHECK_ITEM_STATE = u'updateCheckItemStateOnCard'
TRELLO_CARD_URL_TEMPLATE = u'[{card_name}]({card_url})'
ACTIONS_TO_MESSAGE_MAPPER = {
CREATE: u'created {card_url_template}.',
CHANGE_LIST: u'moved {card_url_template} from {old_list} to {new_list}.',
CHANGE_NAME: u'renamed the card from "{old_name}" to {card_url_template}.',
SET_DESC: u'set description for {card_url_template} to:\n~~~ quote\n{desc}\n~~~\n',
CHANGE_DESC: (u'changed description for {card_url_template} from\n' +
'~~~ quote\n{old_desc}\n~~~\nto\n~~~ quote\n{desc}\n~~~\n'),
REMOVE_DESC: u'removed description from {card_url_template}.',
ARCHIVE: u'archived {card_url_template}.',
REOPEN: u'reopened {card_url_template}.',
SET_DUE_DATE: u'set due date for {card_url_template} to {due_date}.',
CHANGE_DUE_DATE: u'changed due date for {card_url_template} from {old_due_date} to {due_date}.',
REMOVE_DUE_DATE: u'removed the due date from {card_url_template}.',
ADD_LABEL: u'added a {color} label with \"{text}\" to {card_url_template}.',
REMOVE_LABEL: u'removed a {color} label with \"{text}\" from {card_url_template}.',
ADD_MEMBER: u'added {member_name} to {card_url_template}.',
REMOVE_MEMBER: u'removed {member_name} from {card_url_template}.',
ADD_ATTACHMENT: u'added [{attachment_name}]({attachment_url}) to {card_url_template}.',
ADD_CHECKLIST: u'added the {checklist_name} checklist to {card_url_template}.',
COMMENT: u'commented on {card_url_template}:\n~~~ quote\n{text}\n~~~\n',
UPDATE_CHECK_ITEM_STATE: u'{action} **{item_name}** in **{checklist_name}** ({card_url_template}).'
}
def prettify_date(date_string: str) -> str:
return date_string.replace('T', ' ').replace('.000', '').replace('Z', ' UTC')
def process_card_action(payload: Mapping[str, Any], action_type: str) -> Optional[Tuple[str, str]]:
proper_action = get_proper_action(payload, action_type)
if proper_action is not None:
return get_subject(payload), get_body(payload, proper_action)
return None
def get_proper_action(payload: Mapping[str, Any], action_type: str) -> Optional[str]:
if action_type == 'updateCard':
data = get_action_data(payload)
old_data = data['old']
card_data = data['card']
if data.get('listBefore'):
return CHANGE_LIST
if old_data.get('name'):
return CHANGE_NAME
if old_data.get('desc') == "":
return SET_DESC
if old_data.get('desc'):
if card_data.get('desc') == "":
return REMOVE_DESC
else:
return CHANGE_DESC
if old_data.get('due', False) is None:
return SET_DUE_DATE
if old_data.get('due'):
if card_data.get('due', False) is None:
return REMOVE_DUE_DATE
else:
return CHANGE_DUE_DATE
if old_data.get('closed') is False and card_data.get('closed'):
return ARCHIVE
if old_data.get('closed') and card_data.get('closed') is False:
return REOPEN
# we don't support events for when a card is moved up or down
# within a single list
if old_data.get('pos'):
return None
raise UnknownUpdateCardAction()
return action_type
def get_subject(payload: Mapping[str, Any]) -> str:
return get_action_data(payload)['board'].get('name')
def get_body(payload: Mapping[str, Any], action_type: str) -> str:
message_body = ACTIONS_TO_FILL_BODY_MAPPER[action_type](payload, action_type)
creator = payload['action']['memberCreator'].get('fullName')
return u'{full_name} {rest}'.format(full_name=creator, rest=message_body)
def get_added_checklist_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'checklist_name': get_action_data(payload)['checklist'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_update_check_item_body(payload: Mapping[str, Any], action_type: str) -> str:
action = get_action_data(payload)
state = action['checkItem']['state']
data = {
'action': 'checked' if state == 'complete' else 'unchecked',
'checklist_name': action['checklist'].get('name'),
'item_name': action['checkItem'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_added_attachment_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'attachment_url': get_action_data(payload)['attachment'].get('url'),
'attachment_name': get_action_data(payload)['attachment'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_updated_card_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'card_name': get_card_name(payload),
'old_list': get_action_data(payload)['listBefore'].get('name'),
'new_list': get_action_data(payload)['listAfter'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_renamed_card_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'old_name': get_action_data(payload)['old'].get('name'),
'new_name': get_action_data(payload)['old'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_added_label_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'color': get_action_data(payload).get('value'),
'text': get_action_data(payload).get('text'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_member_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'member_name': payload['action']['member'].get('fullName')
}
return fill_appropriate_message_content(payload, action_type, data)
def get_comment_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'text': get_action_data(payload)['text'],
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_due_date_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'due_date': prettify_date(get_action_data(payload)['card'].get('due'))
}
return fill_appropriate_message_content(payload, action_type, data)
def get_changed_due_date_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'due_date': prettify_date(get_action_data(payload)['card'].get('due')),
'old_due_date': prettify_date(get_action_data(payload)['old'].get('due'))
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_desc_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'desc': prettify_date(get_action_data(payload)['card']['desc'])
}
return fill_appropriate_message_content(payload, action_type, data)
def get_changed_desc_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'desc': prettify_date(get_action_data(payload)['card']['desc']),
'old_desc': prettify_date(get_action_data(payload)['old']['desc'])
}
return fill_appropriate_message_content(payload, action_type, data)
def get_body_by_action_type_without_data(payload: Mapping[str, Any], action_type: str) -> str:
return fill_appropriate_message_content(payload, action_type)
def fill_appropriate_message_content(payload: Mapping[str, Any],
action_type: str,
data: Optional[Dict[str, Any]]=None) -> str:
data = {} if data is None else data
data['card_url_template'] = data.get('card_url_template', get_filled_card_url_template(payload))
message_body = get_message_body(action_type)
return message_body.format(**data)
def get_filled_card_url_template(payload: Mapping[str, Any]) -> str:
return TRELLO_CARD_URL_TEMPLATE.format(card_name=get_card_name(payload), card_url=get_card_url(payload))
def get_card_url(payload: Mapping[str, Any]) -> str:
return u'https://trello.com/c/{}'.format(get_action_data(payload)['card'].get('shortLink'))
def get_message_body(action_type: str) -> str:
return ACTIONS_TO_MESSAGE_MAPPER[action_type]
def get_card_name(payload: Mapping[str, Any]) -> str:
return get_action_data(payload)['card'].get('name')
def get_action_data(payload: Mapping[str, Any]) -> Mapping[str, Any]:
return payload['action'].get('data')
ACTIONS_TO_FILL_BODY_MAPPER = {
CREATE: get_body_by_action_type_without_data,
CHANGE_LIST: get_updated_card_body,
CHANGE_NAME: get_renamed_card_body,
SET_DESC: get_managed_desc_body,
CHANGE_DESC: get_changed_desc_body,
REMOVE_DESC: get_body_by_action_type_without_data,
ARCHIVE: get_body_by_action_type_without_data,
REOPEN: get_body_by_action_type_without_data,
SET_DUE_DATE: get_managed_due_date_body,
CHANGE_DUE_DATE: get_changed_due_date_body,
REMOVE_DUE_DATE: get_body_by_action_type_without_data,
ADD_LABEL: get_added_label_body,
REMOVE_LABEL: get_added_label_body,
ADD_MEMBER: get_managed_member_body,
REMOVE_MEMBER: get_managed_member_body,
ADD_ATTACHMENT: get_added_attachment_body,
ADD_CHECKLIST: get_added_checklist_body,
COMMENT: get_comment_body,
UPDATE_CHECK_ITEM_STATE: get_update_check_item_body,
}
| tommyip/zulip | zerver/webhooks/trello/view/card_actions.py | Python | apache-2.0 | 10,437 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import array
import math
import os
import random
import sys
import subprocess
def create_graph(nodes, edges, verbose):
if verbose: print('Creating random graph with {} nodes and {} edges...'.format(nodes, edges))
n1 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
n2 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
length = [ random.expovariate(1.0) for x in xrange(edges) ]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def compute_subgraphs(n, p):
return [(x*(n/p) + min(x, n%p), ((x+1)*(n/p)-1) + min(x + 1, n%p)) for x in xrange(0, p)]
def find_subgraph_index(n, subgraphs):
s = [i for i, (start, end) in zip(xrange(len(subgraphs)), subgraphs) if start <= n and n <= end]
assert len(s) == 1
return s[0]
def find_subgraph(n, subgraphs):
return subgraphs[find_subgraph_index(n, subgraphs)]
def create_clustered_DAG_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered DAG graph with {} nodes and {} edges...'.format(nodes, edges))
subgraphs = compute_subgraphs(nodes, nsubgraphs)
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
n2 = random.randint(min(n1, nodes-1), nodes-1)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [random.expovariate(1.0) for x in xrange(edges)]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def create_clustered_geometric_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered geometric graph with {} nodes and {} edges...'.format(nodes, edges))
blocks = int(math.sqrt(nsubgraphs))
assert blocks**2 == nsubgraphs
bounds = [((1.0*(i%blocks)/blocks, 1.0*(i%blocks + 1)/blocks),
(1.0*(i/blocks)/blocks, 1.0*(i/blocks + 1)/blocks))
for i in xrange(nsubgraphs)]
subgraphs = compute_subgraphs(nodes, nsubgraphs)
pos = [(random.uniform(*x), random.uniform(*y))
for (lo, hi), (x, y) in zip(subgraphs, bounds)
for _ in xrange(lo, hi+1)]
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
i = find_subgraph_index(n1, subgraphs)
ix, iy = i%blocks, i/blocks
if random.randint(0, 1) == 0:
s2 = subgraphs[((ix+1)%blocks) + iy*blocks]
else:
s2 = subgraphs[ix + ((iy+1)%blocks)*blocks]
n2 = random.randint(*s2)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [xlen + random.expovariate(1000/xlen if xlen > 0.0001 else 1)
for x in xrange(edges)
for xlen in [math.sqrt(sum((a - b)**2 for a, b in zip(pos[n1[x]], pos[n2[x]])))]]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def metis_graph(g, metis, subgraphs, outdir, verbose):
if verbose: print('Running METIS...')
with open(os.path.join(outdir, 'graph.metis'), 'wb') as f:
f.write('{:3d} {:3d} 000\n'.format(g['nodes'], g['edges']))
for n in xrange(g['nodes']):
f.write(' '.join('{:3d} 1'.format(n2+1) for n1, n2 in zip(g['n1'], g['n2']) if n1 == n))
f.write('\n')
subprocess.check_call([metis, os.path.join(outdir, 'graph.metis'), str(subgraphs)])
with open(os.path.join(outdir, 'graph.metis.part.{}'.format(subgraphs)), 'rb') as f:
colors = [int(x) for x in f.read().split()]
mapping = dict(zip(sorted(xrange(g['nodes']), key = lambda x: colors[x]), range(g['nodes'])))
g['n1'] = [mapping[g['n1'][x]] for x in xrange(g['edges'])]
g['n2'] = [mapping[g['n2'][x]] for x in xrange(g['edges'])]
def sort_graph(g, verbose):
if verbose: print('Sorting graph...')
mapping = dict(zip(sorted(xrange(g['edges']), key = lambda x: (g['n1'][x], g['n2'][x])), range(g['edges'])))
g['n1'] = [g['n1'][mapping[x]] for x in xrange(g['edges'])]
g['n2'] = [g['n2'][mapping[x]] for x in xrange(g['edges'])]
g['length'] = [g['length'][mapping[x]] for x in xrange(g['edges'])]
def solve_graph(g, source, verbose):
if verbose: print('Solving graph...')
parent = [ -1 for x in xrange(g['nodes']) ]
dist = [ 1e100 for x in xrange(g['nodes']) ]
dist[source] = 0
while True:
count = 0
for n1, n2, length in zip(g['n1'], g['n2'], g['length']):
c2 = length + dist[n1]
if c2 < dist[n2]:
dist[n2] = c2
parent[n2] = n1
count += 1
#print 'count = {:d}'.format(count)
if count == 0:
break
# if verbose:
# for i, e in enumerate(zip(g['n1'], g['n2'], g['length'])):
# print('{:3d} {:3d} {:3d} {:5.3f}'.format(i, e[0], e[1], e[2]))
# for i, n in enumerate(zip(parent, dist)):
# print('{:3d} {:3d} {:5.3f}'.format(i, n[0], n[1]))
return dist
def write_graph(g, problems, outdir, verbose):
if verbose: print('Writing graph...')
with open(os.path.join(outdir, 'edges.dat'), 'wb') as f:
array.array('i', g['n1']).tofile(f)
array.array('i', g['n2']).tofile(f)
array.array('f', g['length']).tofile(f)
with open(os.path.join(outdir, 'graph.dot'), 'wb') as f:
f.write('digraph {\n')
f.write('\n'.join('{} -> {} [ style = "{}"]'.format(e1, e2, 'dotted' if e2 <= e1 else 'solid') for e1, e2 in zip(g['n1'], g['n2'])))
f.write('\n}\n')
with open(os.path.join(outdir, 'graph.txt'), 'w') as f:
f.write('nodes {:d}\n'.format(g['nodes']))
f.write('edges {:d}\n'.format(g['edges']))
f.write('data edges.dat\n')
sources = random.sample(xrange(g['nodes']), problems)
for s in sources:
parents = solve_graph(g, s, verbose)
with open(os.path.join(outdir, 'result_{:d}.dat'.format(s)), 'wb') as f2:
array.array('f', parents).tofile(f2)
f.write('source {:d} result_{:d}.dat\n'.format(s, s))
if __name__ == '__main__':
p = argparse.ArgumentParser(description='graph generator')
p.add_argument('--nodes', '-n', type=int, default=10)
p.add_argument('--edges', '-e', type=int, default=20)
p.add_argument('--type', '-t', default='random', choices=['random', 'clustered_DAG', 'clustered_geometric'])
p.add_argument('--subgraphs', '-s', type=int, default=1)
p.add_argument('--cluster-factor', '-c', type=int, default=95)
p.add_argument('--problems', '-p', type=int, default=1)
p.add_argument('--randseed', '-r', type=int, default=12345)
p.add_argument('--metis-path', default='./metis-install/bin/gpmetis')
p.add_argument('--metis', '-m', action='store_true')
p.add_argument('--outdir', '-o', required=True)
p.add_argument('--verbose', '-v', action='store_true')
args = p.parse_args()
random.seed(args.randseed)
if args.type == 'random':
G = create_graph(args.nodes, args.edges, args.verbose)
elif args.type == 'clustered_DAG':
G = create_clustered_DAG_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
elif args.type == 'clustered_geometric':
G = create_clustered_geometric_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
else:
assert false
try:
os.mkdir(args.outdir)
except:
pass
assert os.path.isdir(args.outdir)
if args.metis:
assert os.path.isfile(args.metis_path)
metis_graph(G, args.metis_path, args.subgraphs, args.outdir, args.verbose)
sort_graph(G, args.verbose)
write_graph(G, args.problems, args.outdir, args.verbose)
| chuckatkins/legion | language/examples/mssp/gen_graph.py | Python | apache-2.0 | 8,232 |
#------------------------------------------------------------------------------
# type_converter.py (Section 6.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import decimal
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
def ReturnNumbersAsDecimal(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.NUMBER:
return cursor.var(str, 9, cursor.arraysize, outconverter = decimal.Decimal)
cur.outputtypehandler = ReturnNumbersAsDecimal
for value, in cur.execute("select 0.1 from dual"):
print("Value:", value, "* 3 =", value * 3)
| cloudera/hue | desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/solutions/type_converter.py | Python | apache-2.0 | 959 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def testParse(self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| joachimmetz/plaso | tests/parsers/chrome_cache.py | Python | apache-2.0 | 1,283 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase):
def testInitialization(self):
with self.test_session():
var0 = variables.Variable(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.Variable(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var0.eval()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var1.eval()
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var0.eval())
self.assertAllClose(1.1, var1.eval())
def testInitializationOrder(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
variables.global_variables_initializer().run()
self.assertAllClose(rnd.eval(), dep.eval())
self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval())
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
def testAssignments(self):
with self.test_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
self.assertAllClose(1.0, plus_one.eval())
self.assertAllClose(1.0, var.eval())
self.assertAllClose(-1.0, minus_one.eval())
self.assertAllClose(-1.0, var.eval())
self.assertAllClose(4.0, four.eval())
self.assertAllClose(4.0, var.eval())
def testResourceAssignments(self):
with self.test_session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
plus_one.eval()
self.assertAllClose(1.0, var.eval())
minus_one.eval()
self.assertAllClose(-1.0, var.eval())
four.eval()
self.assertAllClose(4.0, var.eval())
def testZeroSizeStringAssign(self):
with self.test_session() as sess:
array = variables.Variable(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
sess.run(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(sess.run(copy_op)))
def _countUpToTest(self, dtype):
with self.test_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
variables.global_variables_initializer().run()
self.assertEqual(0, var.eval())
self.assertEqual(0, count_up_to.eval())
self.assertEqual(1, var.eval())
self.assertEqual(1, count_up_to.eval())
self.assertEqual(2, var.eval())
self.assertEqual(2, count_up_to.eval())
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
def testControlDepsNone(self):
with self.test_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.Variable(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
def testControlFlow(self):
with self.test_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testUseVariableAsTensor(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
variables.global_variables_initializer().run()
self.assertAllClose(2.0, var_x.eval())
self.assertAllClose(3.0, var_y.eval())
self.assertAllClose(5.0, math_ops.add(var_x, var_y).eval())
def testZeroSizeVarSameAsConst(self):
with self.test_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
variables.global_variables_initializer().run()
variable_output = variable_mul.eval()
self.assertAllClose(const_mul.eval(), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
def testCachingDevice(self):
with self.test_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
def testCollections(self):
with self.test_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(2.0, trainable=False)
var_z = variables.Variable(2.0, trainable=True)
var_t = variables.Variable(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
def testCollectionsWithScope(self):
with self.test_session():
with ops.name_scope("scope_1"):
var_x = variables.Variable(2.0)
with ops.name_scope("scope_2"):
var_y = variables.Variable(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperators(self):
with self.test_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
variables.global_variables_initializer().run()
self.assertAllClose([2.0], add.eval())
self.assertAllClose([3.0], radd.eval())
self.assertAllClose([1.0], sub.eval())
self.assertAllClose([-1.0], rsub.eval())
self.assertAllClose([20.0], mul.eval())
self.assertAllClose([20.0], rmul.eval())
self.assertAllClose([0.2], div.eval())
self.assertAllClose([5.0], rdiv.eval())
self.assertAllClose([-2.0], neg.eval())
self.assertAllClose([2.0], abs_v.eval())
self.assertAllClose([True], lt.eval())
self.assertAllClose([False], rlt.eval())
self.assertAllClose([True], le.eval())
self.assertAllClose([True], rle.eval())
self.assertAllClose([False], gt.eval())
self.assertAllClose([True], rgt.eval())
self.assertAllClose([True], ge.eval())
self.assertAllClose([True], rge.eval())
self.assertAllClose([6], mod.eval())
self.assertAllClose([3], rmod.eval())
self.assertAllClose([True, False], and_v.eval())
self.assertAllClose([True, True], or_v.eval())
self.assertAllClose([True, False], xor_v.eval())
self.assertAllClose([False, True], invert_v.eval())
self.assertAllClose(rnd[2, 0:0], slice_v.eval())
self.assertAllClose([[80.0]], matmul.eval())
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], rmatmul.eval())
def testSession(self):
with self.test_session() as sess:
var = variables.Variable([1, 12])
variables.global_variables_initializer().run()
self.assertAllClose([1, 12], sess.run(var))
def testDevicePlacement(self):
with self.test_session() as sess:
with ops.device("/cpu:0"):
var = variables.Variable([1, 12])
init_value = var.initialized_value()
init_op = variables.global_variables_initializer()
self.assertEqual(var.op.device, init_value.device)
self.assertEqual(var.op.device, init_op.device)
sess.run(init_op)
def testColocation(self):
with ops.device("/job:ps"):
var = variables.Variable(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.test_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, v1.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v1.eval()
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), v2.initial_value.eval())
with self.assertRaises(errors_impl.FailedPreconditionError):
v2.eval()
variables.global_variables_initializer().run()
self.assertAllClose(np.negative(value), v2.eval())
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
def testNoRefDataRace(self):
with self.test_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
variables.global_variables_initializer().run()
self.assertAllEqual(a.eval(), [1, 2, 3])
self.assertAllEqual(b.eval(), [3, 4, 5])
self.assertAllEqual(c.eval(), [5, 6, 7])
def testInitializerFunctionDevicePlacement(self):
with self.test_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.test_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.test_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, sess.run(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
sess.run(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.test_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
sess.run(v.initialized_value())
def testLoad(self):
with self.test_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
variables.global_variables_initializer().run()
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), var.eval())
def testRepr(self):
var = variables.Variable(np.zeros((5, 5), np.float32), name='noop')
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.test_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, sess.run(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
variables.global_variables_initializer().run()
self.assertEqual(0, sess.run(uninited).size)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), sess.run(uninited))
sess.run(w.initializer)
self.assertAllEqual(np.array([b"v"]), sess.run(uninited))
v.initializer.run()
self.assertEqual(0, sess.run(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, sess.run(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.test_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
variables.global_variables_initializer().run()
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
sess.run([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], b.eval())
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run(inited)
variables.global_variables_initializer().run()
sess.run(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.test_session() as sess:
v = variables.Variable([1, 2])
w = variables.Variable([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
sess.run(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
if __name__ == "__main__":
test.main()
| with-git/tensorflow | tensorflow/python/kernel_tests/variables_test.py | Python | apache-2.0 | 25,860 |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
# Technically maas_common isn't third-party but our own thing but hacking
# consideres it third-party
from maas_common import get_auth_ref
from maas_common import get_keystone_client
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
import requests
from requests import exceptions as exc
# NOTE(mancdaz): until https://review.openstack.org/#/c/111051/
# lands, there is no way to pass a custom (local) endpoint to
# cinderclient. Only way to test local is direct http. :sadface:
def check(auth_ref, args):
keystone = get_keystone_client(auth_ref)
auth_token = keystone.auth_token
VOLUME_ENDPOINT = (
'{protocol}://{hostname}:8776/v1/{tenant}'.format(
protocol=args.protocol,
hostname=args.hostname,
tenant=keystone.tenant_id)
)
s = requests.Session()
s.headers.update(
{'Content-type': 'application/json',
'x-auth-token': auth_token})
try:
# We cannot do /os-services?host=X as cinder returns a hostname of
# X@lvm for cinder-volume binary
r = s.get('%s/os-services' % VOLUME_ENDPOINT, verify=False, timeout=5)
except (exc.ConnectionError,
exc.HTTPError,
exc.Timeout) as e:
metric_bool('client_success', False, m_name='maas_cinder')
status_err(str(e), m_name='maas_cinder')
if not r.ok:
metric_bool('client_success', False, m_name='maas_cinder')
status_err(
'Could not get response from Cinder API',
m_name='cinder'
)
else:
metric_bool('client_success', True, m_name='maas_cinder')
services = r.json()['services']
# We need to match against a host of X and X@lvm (or whatever backend)
if args.host:
backend = ''.join((args.host, '@'))
services = [service for service in services
if (service['host'].startswith(backend) or
service['host'] == args.host)]
if len(services) == 0:
status_err(
'No host(s) found in the service list',
m_name='maas_cinder'
)
status_ok(m_name='maas_cinder')
if args.host:
for service in services:
service_is_up = True
name = '%s_status' % service['binary']
if service['status'] == 'enabled' and service['state'] != 'up':
service_is_up = False
if '@' in service['host']:
[host, backend] = service['host'].split('@')
name = '%s-%s_status' % (service['binary'], backend)
metric_bool(name, service_is_up)
else:
for service in services:
service_is_up = True
if service['status'] == 'enabled' and service['state'] != 'up':
service_is_up = False
name = '%s_on_host_%s' % (service['binary'], service['host'])
metric_bool(name, service_is_up)
def main(args):
auth_ref = get_auth_ref()
check(auth_ref, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check Cinder API against"
" local or remote address")
parser.add_argument('hostname',
type=str,
help='Cinder API hostname or IP address')
parser.add_argument('--host',
type=str,
help='Only return metrics for the specified host')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--protocol',
type=str,
default='http',
help='Protocol to use for cinder client')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
| briancurtin/rpc-maas | playbooks/files/rax-maas/plugins/cinder_service_check.py | Python | apache-2.0 | 4,635 |
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_exec as h2e
DO_POLL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=4, base_port=54323)
else:
h2o_hosts.build_cloud_with_hosts(base_port=54323)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_frame_split(self):
h2o.beta_features = True
csvFilename = 'iris22.csv'
csvPathname = 'iris/' + csvFilename
hex_key = "iris.hex"
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, schema='local', timeoutSecs=10)
print "Just split away and see if anything blows up"
splitMe = hex_key
# don't split
for s in range(10):
fs = h2o.nodes[0].frame_split(source=splitMe, ratios=0.5)
split0_key = fs['split_keys'][0]
split1_key = fs['split_keys'][1]
split0_rows = fs['split_rows'][0]
split1_rows = fs['split_rows'][1]
split0_ratio = fs['split_ratios'][0]
split1_ratio = fs['split_ratios'][1]
print "Iteration", s, "split0_rows:", split0_rows, "split1_rows:", split1_rows
splitMe = split0_key
if split0_rows<=2:
break
if __name__ == '__main__':
h2o.unit_main()
| woobe/h2o | py/testdir_single_jvm/test_frame_split_iris.py | Python | apache-2.0 | 1,612 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi Live driver base classes
"""
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import ProviderError
from libcloud.utils.py3 import httplib
__all__ = [
"API_HOST",
"GandiLiveBaseError",
"JsonParseError",
"ResourceNotFoundError",
"InvalidRequestError",
"ResourceConflictError",
"GandiLiveResponse",
"GandiLiveConnection",
"BaseGandiLiveDriver",
]
API_HOST = "dns.api.gandi.net"
class GandiLiveBaseError(ProviderError):
"""
Exception class for Gandi Live driver
"""
pass
class JsonParseError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 404,
# "message": "Unknown zone",
# "object": "LocalizedHTTPNotFound",
# "cause": "Not Found"
# }
class ResourceNotFoundError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 400,
# "message": "zone or zone_uuid must be set",
# "object": "HTTPBadRequest",
# "cause": "No zone set.",
# "errors": [
# {
# "location": "body",
# "name": "zone_uuid",
# "description": "\"FAKEUUID\" is not a UUID"
# }
# ]
# }
class InvalidRequestError(GandiLiveBaseError):
pass
# Examples:
# {
# "code": 409,
# "message": "Zone Testing already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "The domain example.org already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "This zone is still used by 1 domains",
# "object": "HTTPConflict",
# "cause": "In use"
# }
class ResourceConflictError(GandiLiveBaseError):
pass
class GandiLiveResponse(JsonResponse):
"""
A Base Gandi Live Response class to derive from.
"""
def success(self):
"""
Determine if our request was successful.
For the Gandi Live response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
json_error = False
try:
body = json.loads(self.body)
except Exception:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
# Service does not appear to return HTTP 202 Accepted for anything.
valid_http_codes = [
httplib.OK,
httplib.CREATED,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status)
else:
return body
elif self.status == httplib.NO_CONTENT:
# Parse error for empty body is acceptable, but a non-empty body
# is not.
if len(body) > 0:
msg = '"No Content" response contained content'
raise GandiLiveBaseError(msg, self.status)
else:
return {}
elif self.status == httplib.NOT_FOUND:
message = self._get_error(body, json_error)
raise ResourceNotFoundError(message, self.status)
elif self.status == httplib.BAD_REQUEST:
message = self._get_error(body, json_error)
raise InvalidRequestError(message, self.status)
elif self.status == httplib.CONFLICT:
message = self._get_error(body, json_error)
raise ResourceConflictError(message, self.status)
else:
message = self._get_error(body, json_error)
raise GandiLiveBaseError(message, self.status)
# Errors are not described at all in Gandi's official documentation.
# It appears when an error arises, a JSON object is returned along with
# an HTTP 4xx class code. The object is structured as:
# {
# code: <code>,
# object: <object>,
# message: <message>,
# cause: <cause>,
# errors: [
# {
# location: <error-location>,
# name: <error-name>,
# description: <error-description>
# }
# ]
# }
# where
# <code> is a number equal to the HTTP response status code
# <object> is a string with some internal name for the status code
# <message> is a string detailing what the problem is
# <cause> is a string that comes from a set of succinct problem summaries
# errors is optional; if present:
# <error-location> is a string for which part of the request to look in
# <error-name> is a string naming the parameter
# <error-description> is a string detailing what the problem is
# Here we ignore object and combine message and cause along with an error
# if one or more exists.
def _get_error(self, body, json_error):
"""
Get the error code and message from a JSON response.
Incorporate the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: String containing error message
:rtype: ``str``
"""
if not json_error and "cause" in body:
message = "%s: %s" % (body["cause"], body["message"])
if "errors" in body:
err = body["errors"][0]
message = "%s (%s in %s: %s)" % (
message,
err.get("location"),
err.get("name"),
err.get("description"),
)
else:
message = body
return message
class GandiLiveConnection(ConnectionKey):
"""
Connection class for the Gandi Live driver
"""
responseCls = GandiLiveResponse
host = API_HOST
def add_default_headers(self, headers):
"""
Returns default headers as a dictionary.
"""
headers["Content-Type"] = "application/json"
headers["X-Api-Key"] = self.key
return headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
class BaseGandiLiveDriver(object):
"""
Gandi Live base driver
"""
connectionCls = GandiLiveConnection
name = "GandiLive"
| apache/libcloud | libcloud/common/gandi_live.py | Python | apache-2.0 | 7,334 |
import json
import pytest
class TestNFSs(object):
@pytest.mark.no_docker
def test_nfs_ganesha_is_installed(self, node, host):
assert host.package("nfs-ganesha").is_installed
@pytest.mark.no_docker
def test_nfs_ganesha_rgw_package_is_installed(self, node, host):
assert host.package("nfs-ganesha-rgw").is_installed
@pytest.mark.no_docker
def test_nfs_services_are_running(self, node, host):
assert host.service("nfs-ganesha").is_running
@pytest.mark.no_docker
def test_nfs_services_are_enabled(self, node, host):
assert host.service("nfs-ganesha").is_enabled
@pytest.mark.no_docker
def test_nfs_config_override(self, node, host):
assert host.file("/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
def test_nfs_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-nfs-{hostname}'.format(hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
daemons = [i for i in json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"]]
assert hostname in daemons
#NOTE (guits): This check must be fixed. (Permission denied error)
# @pytest.mark.no_docker
# def test_nfs_rgw_fsal_export(self, node, host):
# if(host.mount_point("/mnt").exists):
# cmd = host.run("sudo umount /mnt")
# assert cmd.rc == 0
# cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/")
# assert cmd.rc == 0
# assert host.mount_point("/mnt").exists
| font/ceph-ansible | tests/functional/tests/nfs/test_nfs_ganesha.py | Python | apache-2.0 | 1,987 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class ServerRescueV3Test(base.BaseV3ComputeTest):
@classmethod
def resource_setup(cls):
if not CONF.compute_feature_enabled.rescue:
msg = "Server rescue not available."
raise cls.skipException(msg)
super(ServerRescueV3Test, cls).resource_setup()
# Server for positive tests
resp, server = cls.create_test_server(wait_until='BUILD')
cls.server_id = server['id']
cls.password = server['admin_password']
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
@test.attr(type='smoke')
def test_rescue_unrescue_instance(self):
resp, body = self.servers_client.rescue_server(
self.server_id, admin_password=self.password)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
resp, body = self.servers_client.unrescue_server(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
| queria/my-tempest | tempest/api/compute/v3/servers/test_server_rescue.py | Python | apache-2.0 | 1,828 |
URL = {
3304557: {
"production": "https://notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl",
"sandbox": "https://homologacao.notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl"
}
}
TEMPLATES = {
'send_rps': "GerarNfseEnvio.xml",
'status': "ConsultarNfseEnvio.xml",
'get_nfse': "ConsultarNfseEnvio.xml",
'cancel': "CancelarNfseEnvio.xml"
} | adrianomargarin/py-notacarioca | notacarioca/settings.py | Python | apache-2.0 | 377 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from notebook.conf import get_interpreters
LOG = logging.getLogger(__name__)
class SessionExpired(Exception):
pass
class QueryExpired(Exception):
pass
class AuthenticationRequired(Exception):
pass
class QueryError(Exception):
def __init__(self, message, handle=None):
self.message = message or _('No error message, please check the logs.')
self.handle = handle
self.extra = {}
def __unicode__(self):
return smart_unicode(self.message)
class Notebook(object):
def __init__(self, document=None):
self.document = None
if document is not None:
self.data = document.data
self.document = document
else:
self.data = json.dumps({
'name': 'My Notebook',
'description': '',
'type': 'notebook',
'snippets': [],
})
def get_json(self):
_data = self.get_data()
return json.dumps(_data)
def get_data(self):
_data = json.loads(self.data)
if self.document is not None:
_data['id'] = self.document.id
_data['is_history'] = self.document.is_history
return _data
def get_str(self):
return '\n\n'.join([snippet['statement_raw'] for snippet in self.get_data()['snippets']])
def get_api(request, snippet):
from notebook.connectors.hiveserver2 import HS2Api
from notebook.connectors.jdbc import JdbcApi
from notebook.connectors.rdbms import RdbmsApi
from notebook.connectors.pig_batch import PigApi
from notebook.connectors.solr import SolrApi
from notebook.connectors.spark_shell import SparkApi
from notebook.connectors.spark_batch import SparkBatchApi
from notebook.connectors.text import TextApi
interpreter = [interpreter for interpreter in get_interpreters(request.user) if interpreter['type'] == snippet['type']]
if not interpreter:
raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
interpreter = interpreter[0]
interface = interpreter['interface']
if interface == 'hiveserver2':
return HS2Api(user=request.user, request=request)
elif interface == 'livy':
return SparkApi(request.user)
elif interface == 'livy-batch':
return SparkBatchApi(request.user)
elif interface == 'text' or interface == 'markdown':
return TextApi(request.user)
elif interface == 'rdbms':
return RdbmsApi(request.user, interpreter=snippet['type'])
elif interface == 'jdbc':
return JdbcApi(request.user, interpreter=interpreter)
elif interface == 'solr':
return SolrApi(request.user, interpreter=interpreter)
elif interface == 'pig':
return PigApi(user=request.user, request=request)
else:
raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
def _get_snippet_session(notebook, snippet):
session = [session for session in notebook['sessions'] if session['type'] == snippet['type']]
if not session:
raise SessionExpired()
else:
return session[0]
# Base API
class Api(object):
def __init__(self, user, interpreter=None, request=None):
self.user = user
self.interpreter = interpreter
self.request = request
def create_session(self, lang, properties=None):
return {
'type': lang,
'id': None,
'properties': properties if not None else []
}
def close_session(self, session):
pass
def fetch_result(self, notebook, snippet, rows, start_over):
pass
def download(self, notebook, snippet, format):
pass
def get_log(self, notebook, snippet, startFrom=None, size=None):
return 'No logs'
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
return {}
def progress(self, snippet, logs=None):
return 50
def get_jobs(self, notebook, snippet, logs):
return []
def export_data_as_hdfs_file(self, snippet, target_file, overwrite): raise NotImplementedError()
def export_data_as_table(self, notebook, snippet, destination): raise NotImplementedError()
def export_large_data_to_hdfs(self, notebook, snippet, destination): raise NotImplementedError()
| Peddle/hue | desktop/libs/notebook/src/notebook/connectors/base.py | Python | apache-2.0 | 5,042 |
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the cost functions
'''
import numpy as np
from neon import NervanaObject
from neon.transforms import (CrossEntropyBinary, CrossEntropyMulti, SumSquared,
Misclassification)
def compare_tensors(func, y, t, outputs, deriv=False, tol=0.):
be = NervanaObject.be
temp = be.empty(outputs.shape)
dtypeu = np.float32
if deriv is True:
temp[:] = func.bprop(be.array(dtypeu(y)), be.array(dtypeu(t)))
else:
# try:
temp[:] = func(be.array(dtypeu(y)), be.array(dtypeu(t)))
# except:
# import ipdb; ipdb.set_trace()
cond = np.sum(np.abs(temp.get() - outputs) <= tol)
assert cond == np.prod(outputs.shape)
"""
CrossEntropyBinary
"""
def test_cross_entropy_binary(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum((-targets * np.log(outputs + eps)) -
(1 - targets) * np.log(1 - outputs + eps),
keepdims=True)
compare_tensors(CrossEntropyBinary(),
outputs, targets, expected_result, tol=1e-6)
def test_cross_entropy_binary_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum((-targets * np.log(outputs + eps)) -
(1 - targets) * np.log(1 - outputs + eps),
keepdims=True)
compare_tensors(CrossEntropyBinary(),
outputs, targets, expected_result, tol=1e-5)
def test_cross_entropy_binary_derivative(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
# bprop assumes shortcut
expected_result = ((outputs - targets) / outputs.shape[1])
compare_tensors(
CrossEntropyBinary(), outputs, targets, expected_result, deriv=True,
tol=1e-6)
"""
CrossEntropyMulti
"""
def test_cross_entropy_multi(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)),
axis=0, keepdims=True)
compare_tensors(CrossEntropyMulti(),
outputs, targets, expected_result, tol=1e-6)
def test_cross_entropy_multi_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)),
axis=0, keepdims=True)
compare_tensors(CrossEntropyMulti(),
outputs, targets, expected_result, tol=1e-5)
def test_cross_entropy_multi_derivative(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = ((outputs - targets) / outputs.shape[1])
compare_tensors(CrossEntropyMulti(), outputs, targets, expected_result,
deriv=True, tol=1e-6)
"""
SumSquared
"""
def test_sum_squared(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2.
compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-8)
def test_sum_squared_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2.
compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-7)
def test_sum_squared_derivative(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = (outputs - targets) / outputs.shape[1]
compare_tensors(SumSquared(), outputs,
targets, expected_result, deriv=True, tol=1e-8)
"""
Misclassification
"""
def compare_metric(func, y, t, outputs, deriv=False, tol=0.):
be = NervanaObject.be
dtypeu = np.float32
temp = func(be.array(dtypeu(y)), be.array(dtypeu(t)))
cond = np.sum(np.abs(temp - outputs) <= tol)
assert cond == np.prod(outputs.shape)
def test_misclassification(backend_default):
NervanaObject.be.bsz = 3
outputs = np.array(
[[0.25, 0.99, 0.33], [0.5, 0.005, 0.32], [0.25, 0.005, 0.34]])
targets = np.array([[0, 1, 0], [1, 0, 1], [0, 0, 0]])
expected_result = np.ones((1, 1)) / 3.
compare_metric(Misclassification(),
outputs, targets, expected_result, tol=1e-7)
| jfsantos/neon | tests/test_costs.py | Python | apache-2.0 | 5,749 |
"""
Switches on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/switch.zha/
"""
import logging
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core.const import (
DATA_ZHA, DATA_ZHA_DISPATCHERS, ZHA_DISCOVERY_NEW, ON_OFF_CHANNEL,
SIGNAL_ATTR_UPDATED
)
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zha']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old way of setting up Zigbee Home Automation switches."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation switch from config entry."""
async def async_discover(discovery_info):
await _async_setup_entities(hass, config_entry, async_add_entities,
[discovery_info])
unsub = async_dispatcher_connect(
hass, ZHA_DISCOVERY_NEW.format(DOMAIN), async_discover)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
switches = hass.data.get(DATA_ZHA, {}).get(DOMAIN)
if switches is not None:
await _async_setup_entities(hass, config_entry, async_add_entities,
switches.values())
del hass.data[DATA_ZHA][DOMAIN]
async def _async_setup_entities(hass, config_entry, async_add_entities,
discovery_infos):
"""Set up the ZHA switches."""
entities = []
for discovery_info in discovery_infos:
entities.append(Switch(**discovery_info))
async_add_entities(entities, update_before_add=True)
class Switch(ZhaEntity, SwitchDevice):
"""ZHA switch."""
_domain = DOMAIN
def __init__(self, **kwargs):
"""Initialize the ZHA switch."""
super().__init__(**kwargs)
self._on_off_channel = self.cluster_channels.get(ON_OFF_CHANNEL)
@property
def is_on(self) -> bool:
"""Return if the switch is on based on the statemachine."""
if self._state is None:
return False
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
success = await self._on_off_channel.on()
if not success:
return
self._state = True
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
success = await self._on_off_channel.off()
if not success:
return
self._state = False
self.async_schedule_update_ha_state()
def async_set_state(self, state):
"""Handle state update from channel."""
self._state = bool(state)
self.async_schedule_update_ha_state()
@property
def device_state_attributes(self):
"""Return state attributes."""
return self.state_attributes
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_state)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state == STATE_ON
async def async_update(self):
"""Attempt to retrieve on off state from the switch."""
await super().async_update()
if self._on_off_channel:
self._state = await self._on_off_channel.get_attribute_value(
'on_off')
| nugget/home-assistant | homeassistant/components/zha/switch.py | Python | apache-2.0 | 3,817 |
import asyncio
import functools
import io
import unittest
import zlib
from unittest import mock
import pytest
import aiohttp.multipart
from aiohttp import helpers, payload
from aiohttp.hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING,
CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)
from aiohttp.helpers import parse_mimetype
from aiohttp.multipart import (content_disposition_filename,
parse_content_disposition)
from aiohttp.streams import DEFAULT_LIMIT as stream_reader_default_limit
from aiohttp.streams import StreamReader
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def stream(buf):
writer = mock.Mock()
def write(chunk):
buf.extend(chunk)
return ()
writer.write.side_effect = write
return writer
@pytest.fixture
def writer():
return aiohttp.multipart.MultipartWriter(boundary=':')
def run_in_loop(f):
@functools.wraps(f)
def wrapper(testcase, *args, **kwargs):
coro = asyncio.coroutine(f)
future = asyncio.wait_for(coro(testcase, *args, **kwargs), timeout=5)
return testcase.loop.run_until_complete(future)
return wrapper
class MetaAioTestCase(type):
def __new__(cls, name, bases, attrs):
for key, obj in attrs.items():
if key.startswith('test_'):
attrs[key] = run_in_loop(obj)
return super().__new__(cls, name, bases, attrs)
class TestCase(unittest.TestCase, metaclass=MetaAioTestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
def future(self, obj):
fut = helpers.create_future(self.loop)
fut.set_result(obj)
return fut
class Response(object):
def __init__(self, headers, content):
self.headers = headers
self.content = content
class Stream(object):
def __init__(self, content):
self.content = io.BytesIO(content)
@asyncio.coroutine
def read(self, size=None):
return self.content.read(size)
def at_eof(self):
return self.content.tell() == len(self.content.getbuffer())
@asyncio.coroutine
def readline(self):
return self.content.readline()
def unread_data(self, data):
self.content = io.BytesIO(data + self.content.read())
class StreamWithShortenRead(Stream):
def __init__(self, content):
self._first = True
super().__init__(content)
@asyncio.coroutine
def read(self, size=None):
if size is not None and self._first:
self._first = False
size = size // 2
return (yield from super().read(size))
class MultipartResponseWrapperTestCase(TestCase):
def setUp(self):
super().setUp()
wrapper = aiohttp.multipart.MultipartResponseWrapper(mock.Mock(),
mock.Mock())
self.wrapper = wrapper
def test_at_eof(self):
self.wrapper.at_eof()
self.assertTrue(self.wrapper.resp.content.at_eof.called)
def test_next(self):
self.wrapper.stream.next.return_value = self.future(b'')
self.wrapper.stream.at_eof.return_value = False
yield from self.wrapper.next()
self.assertTrue(self.wrapper.stream.next.called)
def test_release(self):
self.wrapper.resp.release.return_value = self.future(None)
yield from self.wrapper.release()
self.assertTrue(self.wrapper.resp.release.called)
def test_release_when_stream_at_eof(self):
self.wrapper.resp.release.return_value = self.future(None)
self.wrapper.stream.next.return_value = self.future(b'')
self.wrapper.stream.at_eof.return_value = True
yield from self.wrapper.next()
self.assertTrue(self.wrapper.stream.next.called)
self.assertTrue(self.wrapper.resp.release.called)
class PartReaderTestCase(TestCase):
def setUp(self):
super().setUp()
self.boundary = b'--:'
def test_next(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello, world!\r\n--:'))
result = yield from obj.next()
self.assertEqual(b'Hello, world!', result)
self.assertTrue(obj.at_eof())
def test_next_next(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello, world!\r\n--:'))
result = yield from obj.next()
self.assertEqual(b'Hello, world!', result)
self.assertTrue(obj.at_eof())
result = yield from obj.next()
self.assertIsNone(result)
def test_read(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello, world!\r\n--:'))
result = yield from obj.read()
self.assertEqual(b'Hello, world!', result)
self.assertTrue(obj.at_eof())
def test_read_chunk_at_eof(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'--:'))
obj._at_eof = True
result = yield from obj.read_chunk()
self.assertEqual(b'', result)
def test_read_chunk_without_content_length(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello, world!\r\n--:'))
c1 = yield from obj.read_chunk(8)
c2 = yield from obj.read_chunk(8)
c3 = yield from obj.read_chunk(8)
self.assertEqual(c1 + c2, b'Hello, world!')
self.assertEqual(c3, b'')
def test_read_incomplete_chunk(self):
stream = Stream(b'')
def prepare(data):
f = helpers.create_future(self.loop)
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, '),
prepare(b'World'),
prepare(b'!\r\n--:'),
prepare(b'')
]):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
c1 = yield from obj.read_chunk(8)
self.assertEqual(c1, b'Hello, ')
c2 = yield from obj.read_chunk(8)
self.assertEqual(c2, b'World')
c3 = yield from obj.read_chunk(8)
self.assertEqual(c3, b'!')
def test_read_all_at_once(self):
stream = Stream(b'Hello, World!\r\n--:--\r\n')
obj = aiohttp.multipart.BodyPartReader(self.boundary, {}, stream)
result = yield from obj.read_chunk()
self.assertEqual(b'Hello, World!', result)
result = yield from obj.read_chunk()
self.assertEqual(b'', result)
self.assertTrue(obj.at_eof())
def test_read_incomplete_body_chunked(self):
stream = Stream(b'Hello, World!\r\n-')
obj = aiohttp.multipart.BodyPartReader(self.boundary, {}, stream)
result = b''
with self.assertRaises(AssertionError):
for _ in range(4):
result += yield from obj.read_chunk(7)
self.assertEqual(b'Hello, World!\r\n-', result)
def test_read_boundary_with_incomplete_chunk(self):
stream = Stream(b'')
def prepare(data):
f = helpers.create_future(self.loop)
f.set_result(data)
return f
with mock.patch.object(stream, 'read', side_effect=[
prepare(b'Hello, World'),
prepare(b'!\r\n'),
prepare(b'--:'),
prepare(b'')
]):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
c1 = yield from obj.read_chunk(12)
self.assertEqual(c1, b'Hello, World')
c2 = yield from obj.read_chunk(8)
self.assertEqual(c2, b'!')
c3 = yield from obj.read_chunk(8)
self.assertEqual(c3, b'')
def test_multi_read_chunk(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.multipart.BodyPartReader(self.boundary, {}, stream)
result = yield from obj.read_chunk(8)
self.assertEqual(b'Hello,', result)
result = yield from obj.read_chunk(8)
self.assertEqual(b'', result)
self.assertTrue(obj.at_eof())
def test_read_chunk_properly_counts_read_bytes(self):
expected = b'.' * 10
size = len(expected)
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {'CONTENT-LENGTH': size},
StreamWithShortenRead(expected + b'\r\n--:--'))
result = bytearray()
while True:
chunk = yield from obj.read_chunk()
if not chunk:
break
result.extend(chunk)
self.assertEqual(size, len(result))
self.assertEqual(b'.' * size, result)
self.assertTrue(obj.at_eof())
def test_read_does_not_read_boundary(self):
stream = Stream(b'Hello, world!\r\n--:')
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
result = yield from obj.read()
self.assertEqual(b'Hello, world!', result)
self.assertEqual(b'--:', (yield from stream.read()))
def test_multiread(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--'))
result = yield from obj.read()
self.assertEqual(b'Hello,', result)
result = yield from obj.read()
self.assertEqual(b'', result)
self.assertTrue(obj.at_eof())
def test_read_multiline(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--'))
result = yield from obj.read()
self.assertEqual(b'Hello\n,\r\nworld!', result)
result = yield from obj.read()
self.assertEqual(b'', result)
self.assertTrue(obj.at_eof())
def test_read_respects_content_length(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {'CONTENT-LENGTH': 100500},
Stream(b'.' * 100500 + b'\r\n--:--'))
result = yield from obj.read()
self.assertEqual(b'.' * 100500, result)
self.assertTrue(obj.at_eof())
def test_read_with_content_encoding_gzip(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'gzip'},
Stream(b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU'
b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00'
b'\r\n--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(b'Time to Relax!', result)
def test_read_with_content_encoding_deflate(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'deflate'},
Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(b'Time to Relax!', result)
def test_read_with_content_encoding_identity(self):
thing = (b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU'
b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00'
b'\r\n')
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'identity'},
Stream(thing + b'--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(thing[:-2], result)
def test_read_with_content_encoding_unknown(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'snappy'},
Stream(b'\x0e4Time to Relax!\r\n--:--'))
with self.assertRaises(RuntimeError):
yield from obj.read(decode=True)
def test_read_with_content_transfer_encoding_base64(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TRANSFER_ENCODING: 'base64'},
Stream(b'VGltZSB0byBSZWxheCE=\r\n--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(b'Time to Relax!', result)
def test_read_with_content_transfer_encoding_quoted_printable(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TRANSFER_ENCODING: 'quoted-printable'},
Stream(b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,'
b' =D0=BC=D0=B8=D1=80!\r\n--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,'
b' \xd0\xbc\xd0\xb8\xd1\x80!', result)
@pytest.mark.parametrize('encoding', [])
def test_read_with_content_transfer_encoding_binary(self):
data = b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,' \
b' \xd0\xbc\xd0\xb8\xd1\x80!'
for encoding in ('binary', '8bit', '7bit'):
with self.subTest(encoding):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TRANSFER_ENCODING: encoding},
Stream(data + b'\r\n--:--'))
result = yield from obj.read(decode=True)
self.assertEqual(data, result)
def test_read_with_content_transfer_encoding_unknown(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TRANSFER_ENCODING: 'unknown'},
Stream(b'\x0e4Time to Relax!\r\n--:--'))
with self.assertRaises(RuntimeError):
yield from obj.read(decode=True)
def test_read_text(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello, world!\r\n--:--'))
result = yield from obj.text()
self.assertEqual('Hello, world!', result)
def test_read_text_default_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {},
Stream('Привет, Мир!\r\n--:--'.encode('utf-8')))
result = yield from obj.text()
self.assertEqual('Привет, Мир!', result)
def test_read_text_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {},
Stream('Привет, Мир!\r\n--:--'.encode('cp1251')))
result = yield from obj.text(encoding='cp1251')
self.assertEqual('Привет, Мир!', result)
def test_read_text_guess_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'text/plain;charset=cp1251'},
Stream('Привет, Мир!\r\n--:--'.encode('cp1251')))
result = yield from obj.text()
self.assertEqual('Привет, Мир!', result)
def test_read_text_compressed(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'deflate',
CONTENT_TYPE: 'text/plain'},
Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--'))
result = yield from obj.text()
self.assertEqual('Time to Relax!', result)
def test_read_text_while_closed(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'text/plain'}, Stream(b''))
obj._at_eof = True
result = yield from obj.text()
self.assertEqual('', result)
def test_read_json(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/json'},
Stream(b'{"test": "passed"}\r\n--:--'))
result = yield from obj.json()
self.assertEqual({'test': 'passed'}, result)
def test_read_json_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/json'},
Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251')))
result = yield from obj.json(encoding='cp1251')
self.assertEqual({'тест': 'пассед'}, result)
def test_read_json_guess_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/json; charset=cp1251'},
Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251')))
result = yield from obj.json()
self.assertEqual({'тест': 'пассед'}, result)
def test_read_json_compressed(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_ENCODING: 'deflate',
CONTENT_TYPE: 'application/json'},
Stream(b'\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00\r\n--:--'))
result = yield from obj.json()
self.assertEqual({'test': 'passed'}, result)
def test_read_json_while_closed(self):
stream = Stream(b'')
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/json'}, stream)
obj._at_eof = True
result = yield from obj.json()
self.assertEqual(None, result)
def test_read_form(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/x-www-form-urlencoded'},
Stream(b'foo=bar&foo=baz&boo=\r\n--:--'))
result = yield from obj.form()
self.assertEqual([('foo', 'bar'), ('foo', 'baz'), ('boo', '')],
result)
def test_read_form_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {CONTENT_TYPE: 'application/x-www-form-urlencoded'},
Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('cp1251')))
result = yield from obj.form(encoding='cp1251')
self.assertEqual([('foo', 'bar'), ('foo', 'baz'), ('boo', '')],
result)
def test_read_form_guess_encoding(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary,
{CONTENT_TYPE: 'application/x-www-form-urlencoded; charset=utf-8'},
Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('utf-8')))
result = yield from obj.form()
self.assertEqual([('foo', 'bar'), ('foo', 'baz'), ('boo', '')],
result)
def test_read_form_while_closed(self):
stream = Stream(b'')
obj = aiohttp.multipart.BodyPartReader(
self.boundary,
{CONTENT_TYPE: 'application/x-www-form-urlencoded'}, stream)
obj._at_eof = True
result = yield from obj.form()
self.assertEqual(None, result)
def test_readline(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--'))
result = yield from obj.readline()
self.assertEqual(b'Hello\n', result)
result = yield from obj.readline()
self.assertEqual(b',\r\n', result)
result = yield from obj.readline()
self.assertEqual(b'world!', result)
result = yield from obj.readline()
self.assertEqual(b'', result)
self.assertTrue(obj.at_eof())
def test_release(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
yield from obj.release()
self.assertTrue(obj.at_eof())
self.assertEqual(b'--:\r\n\r\nworld!\r\n--:--', stream.content.read())
def test_release_respects_content_length(self):
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {'CONTENT-LENGTH': 100500},
Stream(b'.' * 100500 + b'\r\n--:--'))
result = yield from obj.release()
self.assertIsNone(result)
self.assertTrue(obj.at_eof())
def test_release_release(self):
stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
yield from obj.release()
yield from obj.release()
self.assertEqual(b'--:\r\n\r\nworld!\r\n--:--', stream.content.read())
def test_filename(self):
part = aiohttp.multipart.BodyPartReader(
self.boundary,
{CONTENT_DISPOSITION: 'attachment; filename=foo.html'},
None)
self.assertEqual('foo.html', part.filename)
def test_reading_long_part(self):
size = 2 * stream_reader_default_limit
stream = StreamReader()
stream.feed_data(b'0' * size + b'\r\n--:--')
stream.feed_eof()
obj = aiohttp.multipart.BodyPartReader(
self.boundary, {}, stream)
data = yield from obj.read()
self.assertEqual(len(data), size)
class MultipartReaderTestCase(TestCase):
def test_from_response(self):
resp = Response({CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\nhello\r\n--:--'))
res = aiohttp.multipart.MultipartReader.from_response(resp)
self.assertIsInstance(res,
aiohttp.multipart.MultipartResponseWrapper)
self.assertIsInstance(res.stream,
aiohttp.multipart.MultipartReader)
def test_bad_boundary(self):
resp = Response(
{CONTENT_TYPE: 'multipart/related;boundary=' + 'a' * 80},
Stream(b''))
with self.assertRaises(ValueError):
aiohttp.multipart.MultipartReader.from_response(resp)
def test_dispatch(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'})
self.assertIsInstance(res, reader.part_reader_cls)
def test_dispatch_bodypart(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'})
self.assertIsInstance(res, reader.part_reader_cls)
def test_dispatch_multipart(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'--:--'))
res = reader._get_part_reader(
{CONTENT_TYPE: 'multipart/related;boundary=--:--'})
self.assertIsInstance(res, reader.__class__)
def test_dispatch_custom_multipart_reader(self):
class CustomReader(aiohttp.multipart.MultipartReader):
pass
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'--:--'))
reader.multipart_reader_cls = CustomReader
res = reader._get_part_reader(
{CONTENT_TYPE: 'multipart/related;boundary=--:--'})
self.assertIsInstance(res, CustomReader)
def test_emit_next(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
res = yield from reader.next()
self.assertIsInstance(res, reader.part_reader_cls)
def test_invalid_boundary(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'---:\r\n\r\necho\r\n---:--'))
with self.assertRaises(ValueError):
yield from reader.next()
def test_release(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(b'--:\r\n'
b'Content-Type: multipart/related;boundary=--:--\r\n'
b'\r\n'
b'----:--\r\n'
b'\r\n'
b'test\r\n'
b'----:--\r\n'
b'\r\n'
b'passed\r\n'
b'----:----\r\n'
b'\r\n'
b'--:--'))
yield from reader.release()
self.assertTrue(reader.at_eof())
def test_release_release(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
yield from reader.release()
self.assertTrue(reader.at_eof())
yield from reader.release()
self.assertTrue(reader.at_eof())
def test_release_next(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n\r\necho\r\n--:--'))
yield from reader.release()
self.assertTrue(reader.at_eof())
res = yield from reader.next()
self.assertIsNone(res)
def test_second_next_releases_previous_object(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = yield from reader.next()
self.assertIsInstance(first, aiohttp.multipart.BodyPartReader)
second = yield from reader.next()
self.assertTrue(first.at_eof())
self.assertFalse(second.at_eof())
def test_release_without_read_the_last_object(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = yield from reader.next()
second = yield from reader.next()
third = yield from reader.next()
self.assertTrue(first.at_eof())
self.assertTrue(second.at_eof())
self.assertTrue(second.at_eof())
self.assertIsNone(third)
def test_read_chunk_by_length_doesnt_breaks_reader(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'Content-Length: 4\r\n\r\n'
b'test'
b'\r\n--:\r\n'
b'Content-Length: 6\r\n\r\n'
b'passed'
b'\r\n--:--'))
body_parts = []
while True:
read_part = b''
part = yield from reader.next()
if part is None:
break
while not part.at_eof():
read_part += yield from part.read_chunk(3)
body_parts.append(read_part)
self.assertListEqual(body_parts, [b'test', b'passed'])
def test_read_chunk_from_stream_doesnt_breaks_reader(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'--:\r\n'
b'\r\n'
b'chunk'
b'\r\n--:\r\n'
b'\r\n'
b'two_chunks'
b'\r\n--:--'))
body_parts = []
while True:
read_part = b''
part = yield from reader.next()
if part is None:
break
while not part.at_eof():
chunk = yield from part.read_chunk(5)
self.assertTrue(chunk)
read_part += chunk
body_parts.append(read_part)
self.assertListEqual(body_parts, [b'chunk', b'two_chunks'])
def test_reading_skips_prelude(self):
reader = aiohttp.multipart.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b'Multi-part data is not supported.\r\n'
b'\r\n'
b'--:\r\n'
b'\r\n'
b'test\r\n'
b'--:\r\n'
b'\r\n'
b'passed\r\n'
b'--:--'))
first = yield from reader.next()
self.assertIsInstance(first, aiohttp.multipart.BodyPartReader)
second = yield from reader.next()
self.assertTrue(first.at_eof())
self.assertFalse(second.at_eof())
@asyncio.coroutine
def test_writer(writer):
assert writer.size == 0
assert writer.boundary == b':'
@asyncio.coroutine
def test_writer_serialize_io_chunk(buf, stream, writer):
flo = io.BytesIO(b'foobarbaz')
writer.append(flo)
yield from writer.write(stream)
assert (buf == b'--:\r\nContent-Type: application/octet-stream'
b'\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n')
@asyncio.coroutine
def test_writer_serialize_json(buf, stream, writer):
writer.append_json({'привет': 'мир'})
yield from writer.write(stream)
assert (b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":'
b' "\\u043c\\u0438\\u0440"}' in buf)
@asyncio.coroutine
def test_writer_serialize_form(buf, stream, writer):
data = [('foo', 'bar'), ('foo', 'baz'), ('boo', 'zoo')]
writer.append_form(data)
yield from writer.write(stream)
assert (b'foo=bar&foo=baz&boo=zoo' in buf)
@asyncio.coroutine
def test_writer_serialize_form_dict(buf, stream, writer):
data = {'hello': 'мир'}
writer.append_form(data)
yield from writer.write(stream)
assert (b'hello=%D0%BC%D0%B8%D1%80' in buf)
@asyncio.coroutine
def test_writer_write(buf, stream, writer):
writer.append('foo-bar-baz')
writer.append_json({'test': 'passed'})
writer.append_form({'test': 'passed'})
writer.append_form([('one', 1), ('two', 2)])
sub_multipart = aiohttp.multipart.MultipartWriter(boundary='::')
sub_multipart.append('nested content')
sub_multipart.headers['X-CUSTOM'] = 'test'
writer.append(sub_multipart)
yield from writer.write(stream)
assert (
(b'--:\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n'
b'Content-Length: 11\r\n\r\n'
b'foo-bar-baz'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/json\r\n'
b'Content-Length: 18\r\n\r\n'
b'{"test": "passed"}'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'test=passed'
b'\r\n'
b'--:\r\n'
b'Content-Type: application/x-www-form-urlencoded\r\n'
b'Content-Length: 11\r\n\r\n'
b'one=1&two=2'
b'\r\n'
b'--:\r\n'
b'Content-Type: multipart/mixed; boundary="::"\r\n'
b'X-Custom: test\r\nContent-Length: 93\r\n\r\n'
b'--::\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n'
b'Content-Length: 14\r\n\r\n'
b'nested content\r\n'
b'--::--\r\n'
b'\r\n'
b'--:--\r\n') == bytes(buf))
@asyncio.coroutine
def test_writer_serialize_with_content_encoding_gzip(buf, stream, writer):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'gzip'})
yield from writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: gzip\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
decompressor = zlib.decompressobj(wbits=16+zlib.MAX_WBITS)
data = decompressor.decompress(message.split(b'\r\n')[0])
data += decompressor.flush()
assert b'Time to Relax!' == data
@asyncio.coroutine
def test_writer_serialize_with_content_encoding_deflate(buf, stream, writer):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'deflate'})
yield from writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: deflate\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n'
assert thing == message
@asyncio.coroutine
def test_writer_serialize_with_content_encoding_identity(buf, stream, writer):
thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00'
writer.append(thing, {CONTENT_ENCODING: 'identity'})
yield from writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Encoding: identity\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 16' == headers)
assert thing == message.split(b'\r\n')[0]
def test_writer_serialize_with_content_encoding_unknown(buf, stream, writer):
with pytest.raises(RuntimeError):
writer.append('Time to Relax!', {CONTENT_ENCODING: 'snappy'})
@asyncio.coroutine
def test_writer_with_content_transfer_encoding_base64(buf, stream, writer):
writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'base64'})
yield from writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Transfer-Encoding: base64\r\n'
b'Content-Type: text/plain; charset=utf-8' ==
headers)
assert b'VGltZSB0byBSZWxheCE=' == message.split(b'\r\n')[0]
@asyncio.coroutine
def test_writer_content_transfer_encoding_quote_printable(buf, stream, writer):
writer.append('Привет, мир!',
{CONTENT_TRANSFER_ENCODING: 'quoted-printable'})
yield from writer.write(stream)
headers, message = bytes(buf).split(b'\r\n\r\n', 1)
assert (b'--:\r\nContent-Transfer-Encoding: quoted-printable\r\n'
b'Content-Type: text/plain; charset=utf-8' == headers)
assert (b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,'
b' =D0=BC=D0=B8=D1=80!' == message.split(b'\r\n')[0])
def test_writer_content_transfer_encoding_unknown(buf, stream, writer):
with pytest.raises(RuntimeError):
writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'unknown'})
class MultipartWriterTestCase(unittest.TestCase):
def setUp(self):
self.buf = bytearray()
self.stream = mock.Mock()
def write(chunk):
self.buf.extend(chunk)
return ()
self.stream.write.side_effect = write
self.writer = aiohttp.multipart.MultipartWriter(boundary=':')
def test_default_subtype(self):
mtype, stype, *_ = parse_mimetype(
self.writer.headers.get(CONTENT_TYPE))
self.assertEqual('multipart', mtype)
self.assertEqual('mixed', stype)
def test_bad_boundary(self):
with self.assertRaises(ValueError):
aiohttp.multipart.MultipartWriter(boundary='тест')
def test_default_headers(self):
self.assertEqual({CONTENT_TYPE: 'multipart/mixed; boundary=":"'},
self.writer.headers)
def test_iter_parts(self):
self.writer.append('foo')
self.writer.append('bar')
self.writer.append('baz')
self.assertEqual(3, len(list(self.writer)))
def test_append(self):
self.assertEqual(0, len(self.writer))
self.writer.append('hello, world!')
self.assertEqual(1, len(self.writer))
self.assertIsInstance(self.writer._parts[0][0], payload.Payload)
def test_append_with_headers(self):
self.writer.append('hello, world!', {'x-foo': 'bar'})
self.assertEqual(1, len(self.writer))
self.assertIn('x-foo', self.writer._parts[0][0].headers)
self.assertEqual(self.writer._parts[0][0].headers['x-foo'], 'bar')
def test_append_json(self):
self.writer.append_json({'foo': 'bar'})
self.assertEqual(1, len(self.writer))
part = self.writer._parts[0][0]
self.assertEqual(part.headers[CONTENT_TYPE], 'application/json')
def test_append_part(self):
part = payload.get_payload(
'test', headers={CONTENT_TYPE: 'text/plain'})
self.writer.append(part, {CONTENT_TYPE: 'test/passed'})
self.assertEqual(1, len(self.writer))
part = self.writer._parts[0][0]
self.assertEqual(part.headers[CONTENT_TYPE], 'test/passed')
def test_append_json_overrides_content_type(self):
self.writer.append_json({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'})
self.assertEqual(1, len(self.writer))
part = self.writer._parts[0][0]
self.assertEqual(part.headers[CONTENT_TYPE], 'test/passed')
def test_append_form(self):
self.writer.append_form({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'})
self.assertEqual(1, len(self.writer))
part = self.writer._parts[0][0]
self.assertEqual(part.headers[CONTENT_TYPE], 'test/passed')
def test_append_multipart(self):
subwriter = aiohttp.multipart.MultipartWriter(boundary=':')
subwriter.append_json({'foo': 'bar'})
self.writer.append(subwriter, {CONTENT_TYPE: 'test/passed'})
self.assertEqual(1, len(self.writer))
part = self.writer._parts[0][0]
self.assertEqual(part.headers[CONTENT_TYPE], 'test/passed')
def test_write(self):
self.assertEqual([], list(self.writer.write(self.stream)))
def test_with(self):
with aiohttp.multipart.MultipartWriter(boundary=':') as writer:
writer.append('foo')
writer.append(b'bar')
writer.append_json({'baz': True})
self.assertEqual(3, len(writer))
def test_append_int_not_allowed(self):
with self.assertRaises(TypeError):
with aiohttp.multipart.MultipartWriter(boundary=':') as writer:
writer.append(1)
def test_append_float_not_allowed(self):
with self.assertRaises(TypeError):
with aiohttp.multipart.MultipartWriter(boundary=':') as writer:
writer.append(1.1)
def test_append_none_not_allowed(self):
with self.assertRaises(TypeError):
with aiohttp.multipart.MultipartWriter(boundary=':') as writer:
writer.append(None)
class ParseContentDispositionTestCase(unittest.TestCase):
# http://greenbytes.de/tech/tc2231/
def test_parse_empty(self):
disptype, params = parse_content_disposition(None)
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_inlonly(self):
disptype, params = parse_content_disposition('inline')
self.assertEqual('inline', disptype)
self.assertEqual({}, params)
def test_inlonlyquoted(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"inline"')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_semicolon(self):
disptype, params = parse_content_disposition(
'form-data; name="data"; filename="file ; name.mp4"')
self.assertEqual(disptype, 'form-data')
self.assertEqual(
params, {'name': 'data', 'filename': 'file ; name.mp4'})
def test_inlwithasciifilename(self):
disptype, params = parse_content_disposition(
'inline; filename="foo.html"')
self.assertEqual('inline', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_inlwithfnattach(self):
disptype, params = parse_content_disposition(
'inline; filename="Not an attachment!"')
self.assertEqual('inline', disptype)
self.assertEqual({'filename': 'Not an attachment!'}, params)
def test_attonly(self):
disptype, params = parse_content_disposition('attachment')
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attonlyquoted(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition('"attachment"')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attonlyucase(self):
disptype, params = parse_content_disposition('ATTACHMENT')
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithasciifilename(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_inlwithasciifilenamepdf(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo.pdf"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.pdf'}, params)
def test_attwithasciifilename25(self):
disptype, params = parse_content_disposition(
'attachment; filename="0000000000111111111122222"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': '0000000000111111111122222'}, params)
def test_attwithasciifilename35(self):
disptype, params = parse_content_disposition(
'attachment; filename="00000000001111111111222222222233333"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': '00000000001111111111222222222233333'},
params)
def test_attwithasciifnescapedchar(self):
disptype, params = parse_content_disposition(
r'attachment; filename="f\oo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attwithasciifnescapedquote(self):
disptype, params = parse_content_disposition(
'attachment; filename="\"quoting\" tested.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': '"quoting" tested.html'}, params)
@unittest.skip('need more smart parser which respects quoted text')
def test_attwithquotedsemicolon(self):
disptype, params = parse_content_disposition(
'attachment; filename="Here\'s a semicolon;.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'Here\'s a semicolon;.html'}, params)
def test_attwithfilenameandextparam(self):
disptype, params = parse_content_disposition(
'attachment; foo="bar"; filename="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html', 'foo': 'bar'}, params)
def test_attwithfilenameandextparamescaped(self):
disptype, params = parse_content_disposition(
'attachment; foo="\"\\";filename="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html', 'foo': '"\\'}, params)
def test_attwithasciifilenameucase(self):
disptype, params = parse_content_disposition(
'attachment; FILENAME="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attwithasciifilenamenq(self):
disptype, params = parse_content_disposition(
'attachment; filename=foo.html')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attwithtokfncommanq(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo,bar.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attwithasciifilenamenqs(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo.html ;')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attemptyparam(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; ;filename=foo')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attwithasciifilenamenqws(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo bar.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attwithfntokensq(self):
disptype, params = parse_content_disposition(
"attachment; filename='foo.html'")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': "'foo.html'"}, params)
def test_attwithisofnplain(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-ä.html'}, params)
def test_attwithutf8fnplain(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-ä.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-ä.html'}, params)
def test_attwithfnrawpctenca(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-%41.html'}, params)
def test_attwithfnusingpct(self):
disptype, params = parse_content_disposition(
'attachment; filename="50%.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': '50%.html'}, params)
def test_attwithfnrawpctencaq(self):
disptype, params = parse_content_disposition(
r'attachment; filename="foo-%\41.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': r'foo-%41.html'}, params)
def test_attwithnamepct(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-%41.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-%41.html'}, params)
def test_attwithfilenamepctandiso(self):
disptype, params = parse_content_disposition(
'attachment; filename="ä-%41.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'ä-%41.html'}, params)
def test_attwithfnrawpctenclong(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-%c3%a4-%e2%82%ac.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-%c3%a4-%e2%82%ac.html'}, params)
def test_attwithasciifilenamews1(self):
disptype, params = parse_content_disposition(
'attachment; filename ="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attwith2filenames(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html"; filename="bar.html"')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attfnbrokentoken(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo[1](2).html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attfnbrokentokeniso(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo-ä.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attfnbrokentokenutf(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo-ä.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdisposition(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdisposition2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'x=y; filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdisposition3(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'"foo; filename=bar;baz"; filename=qux')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdisposition4(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html, filename=bar.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_emptydisposition(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'; filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_doublecolon(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
': inline; attachment; filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attandinline(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'inline; attachment; filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attandinline2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; inline; filename=foo.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attbrokenquotedfn(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="foo.html".txt')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attbrokenquotedfn2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename="bar')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attbrokenquotedfn3(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo"bar;baz"qux')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmultinstances(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=foo.html, attachment; filename=bar.html')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdelim(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; foo=foo filename=bar')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdelim2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename=bar foo=foo')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attmissingdelim3(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment filename=bar')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attreversed(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'filename=foo.html; attachment')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attconfusedparam(self):
disptype, params = parse_content_disposition(
'attachment; xfilename=foo.html')
self.assertEqual('attachment', disptype)
self.assertEqual({'xfilename': 'foo.html'}, params)
def test_attabspath(self):
disptype, params = parse_content_disposition(
'attachment; filename="/foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attabspathwin(self):
disptype, params = parse_content_disposition(
'attachment; filename="\\foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo.html'}, params)
def test_attcdate(self):
disptype, params = parse_content_disposition(
'attachment; creation-date="Wed, 12 Feb 1997 16:29:51 -0500"')
self.assertEqual('attachment', disptype)
self.assertEqual({'creation-date': 'Wed, 12 Feb 1997 16:29:51 -0500'},
params)
def test_attmdate(self):
disptype, params = parse_content_disposition(
'attachment; modification-date="Wed, 12 Feb 1997 16:29:51 -0500"')
self.assertEqual('attachment', disptype)
self.assertEqual(
{'modification-date': 'Wed, 12 Feb 1997 16:29:51 -0500'},
params)
def test_dispext(self):
disptype, params = parse_content_disposition('foobar')
self.assertEqual('foobar', disptype)
self.assertEqual({}, params)
def test_dispextbadfn(self):
disptype, params = parse_content_disposition(
'attachment; example="filename=example.txt"')
self.assertEqual('attachment', disptype)
self.assertEqual({'example': 'filename=example.txt'}, params)
def test_attwithisofn2231iso(self):
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%E4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä.html'}, params)
def test_attwithfn2231utf8(self):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4-%e2%82%ac.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä-€.html'}, params)
def test_attwithfn2231noc(self):
disptype, params = parse_content_disposition(
"attachment; filename*=''foo-%c3%a4-%e2%82%ac.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä-€.html'}, params)
def test_attwithfn2231utf8comp(self):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-a%cc%88.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä.html'}, params)
@unittest.skip('should raise decoding error: %82 is invalid for latin1')
def test_attwithfn2231utf8_bad(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=iso-8859-1''foo-%c3%a4-%e2%82%ac.html")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
@unittest.skip('should raise decoding error: %E4 is invalid for utf-8')
def test_attwithfn2231iso_bad(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=utf-8''foo-%E4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithfn2231ws1(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename *=UTF-8''foo-%c3%a4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithfn2231ws2(self):
disptype, params = parse_content_disposition(
"attachment; filename*= UTF-8''foo-%c3%a4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä.html'}, params)
def test_attwithfn2231ws3(self):
disptype, params = parse_content_disposition(
"attachment; filename* =UTF-8''foo-%c3%a4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'foo-ä.html'}, params)
def test_attwithfn2231quot(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=\"UTF-8''foo-%c3%a4.html\"")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithfn2231quot2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=\"foo%20bar.html\"")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithfn2231singleqmissing(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8'foo-%c3%a4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
@unittest.skip('urllib.parse.unquote is tolerate to standalone % chars')
def test_attwithfn2231nbadpct1(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo%")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
@unittest.skip('urllib.parse.unquote is tolerate to standalone % chars')
def test_attwithfn2231nbadpct2(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''f%oo.html")
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
def test_attwithfn2231dpct(self):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''A-%2541.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'A-%41.html'}, params)
def test_attwithfn2231abspathdisguised(self):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''%5cfoo.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': '\\foo.html'}, params)
def test_attfncont(self):
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*1="html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo.',
'filename*1': 'html'}, params)
def test_attfncontqs(self):
disptype, params = parse_content_disposition(
r'attachment; filename*0="foo"; filename*1="\b\a\r.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo',
'filename*1': 'bar.html'}, params)
def test_attfncontenc(self):
disptype, params = parse_content_disposition(
'attachment; filename*0*=UTF-8''foo-%c3%a4; filename*1=".html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0*': 'UTF-8''foo-%c3%a4',
'filename*1': '.html'}, params)
def test_attfncontlz(self):
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*01="bar"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo',
'filename*01': 'bar'}, params)
def test_attfncontnc(self):
disptype, params = parse_content_disposition(
'attachment; filename*0="foo"; filename*2="bar"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo',
'filename*2': 'bar'}, params)
def test_attfnconts1(self):
disptype, params = parse_content_disposition(
'attachment; filename*0="foo."; filename*2="html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo.',
'filename*2': 'html'}, params)
def test_attfncontord(self):
disptype, params = parse_content_disposition(
'attachment; filename*1="bar"; filename*0="foo"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*0': 'foo',
'filename*1': 'bar'}, params)
def test_attfnboth(self):
disptype, params = parse_content_disposition(
'attachment; filename="foo-ae.html";'
" filename*=UTF-8''foo-%c3%a4.html")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'}, params)
def test_attfnboth2(self):
disptype, params = parse_content_disposition(
"attachment; filename*=UTF-8''foo-%c3%a4.html;"
' filename="foo-ae.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'}, params)
def test_attfnboth3(self):
disptype, params = parse_content_disposition(
"attachment; filename*0*=ISO-8859-15''euro-sign%3d%a4;"
" filename*=ISO-8859-1''currency-sign%3d%a4")
self.assertEqual('attachment', disptype)
self.assertEqual({'filename*': 'currency-sign=¤',
'filename*0*': "ISO-8859-15''euro-sign%3d%a4"},
params)
def test_attnewandfn(self):
disptype, params = parse_content_disposition(
'attachment; foobar=x; filename="foo.html"')
self.assertEqual('attachment', disptype)
self.assertEqual({'foobar': 'x',
'filename': 'foo.html'}, params)
def test_attrfc2047token(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionHeader):
disptype, params = parse_content_disposition(
'attachment; filename==?ISO-8859-1?Q?foo-=E4.html?=')
self.assertEqual(None, disptype)
self.assertEqual({}, params)
def test_attrfc2047quoted(self):
disptype, params = parse_content_disposition(
'attachment; filename="=?ISO-8859-1?Q?foo-=E4.html?="')
self.assertEqual('attachment', disptype)
self.assertEqual({'filename': '=?ISO-8859-1?Q?foo-=E4.html?='}, params)
def test_bad_continuous_param(self):
with self.assertWarns(aiohttp.multipart.BadContentDispositionParam):
disptype, params = parse_content_disposition(
'attachment; filename*0=foo bar')
self.assertEqual('attachment', disptype)
self.assertEqual({}, params)
class ContentDispositionFilenameTestCase(unittest.TestCase):
# http://greenbytes.de/tech/tc2231/
def test_no_filename(self):
self.assertIsNone(content_disposition_filename({}))
self.assertIsNone(content_disposition_filename({'foo': 'bar'}))
def test_filename(self):
params = {'filename': 'foo.html'}
self.assertEqual('foo.html', content_disposition_filename(params))
def test_filename_ext(self):
params = {'filename*': 'файл.html'}
self.assertEqual('файл.html', content_disposition_filename(params))
def test_attfncont(self):
params = {'filename*0': 'foo.', 'filename*1': 'html'}
self.assertEqual('foo.html', content_disposition_filename(params))
def test_attfncontqs(self):
params = {'filename*0': 'foo', 'filename*1': 'bar.html'}
self.assertEqual('foobar.html', content_disposition_filename(params))
def test_attfncontenc(self):
params = {'filename*0*': "UTF-8''foo-%c3%a4",
'filename*1': '.html'}
self.assertEqual('foo-ä.html', content_disposition_filename(params))
def test_attfncontlz(self):
params = {'filename*0': 'foo',
'filename*01': 'bar'}
self.assertEqual('foo', content_disposition_filename(params))
def test_attfncontnc(self):
params = {'filename*0': 'foo',
'filename*2': 'bar'}
self.assertEqual('foo', content_disposition_filename(params))
def test_attfnconts1(self):
params = {'filename*1': 'foo',
'filename*2': 'bar'}
self.assertEqual(None, content_disposition_filename(params))
def test_attfnboth(self):
params = {'filename': 'foo-ae.html',
'filename*': 'foo-ä.html'}
self.assertEqual('foo-ä.html', content_disposition_filename(params))
def test_attfnboth3(self):
params = {'filename*0*': "ISO-8859-15''euro-sign%3d%a4",
'filename*': 'currency-sign=¤'}
self.assertEqual('currency-sign=¤',
content_disposition_filename(params))
def test_attrfc2047quoted(self):
params = {'filename': '=?ISO-8859-1?Q?foo-=E4.html?='}
self.assertEqual('=?ISO-8859-1?Q?foo-=E4.html?=',
content_disposition_filename(params))
| juliatem/aiohttp | tests/test_multipart.py | Python | apache-2.0 | 67,357 |
from __future__ import absolute_import
from .telegram import TelegramService
| brantje/telegram-github-bot | captain_hook/services/telegram/__init__.py | Python | apache-2.0 | 77 |
"""Home Assistant command line scripts."""
from __future__ import annotations
import argparse
import asyncio
import importlib
import logging
import os
import sys
from typing import Sequence
from homeassistant import runner
from homeassistant.bootstrap import async_mount_local_lib_path
from homeassistant.config import get_default_config_dir
from homeassistant.requirements import pip_kwargs
from homeassistant.util.package import install_package, is_installed, is_virtual_env
# mypy: allow-untyped-defs, no-warn-return-any
def run(args: list) -> int:
"""Run a script."""
scripts = []
path = os.path.dirname(__file__)
for fil in os.listdir(path):
if fil == "__pycache__":
continue
if os.path.isdir(os.path.join(path, fil)):
scripts.append(fil)
elif fil != "__init__.py" and fil.endswith(".py"):
scripts.append(fil[:-3])
if not args:
print("Please specify a script to run.")
print("Available scripts:", ", ".join(scripts))
return 1
if args[0] not in scripts:
print("Invalid script specified.")
print("Available scripts:", ", ".join(scripts))
return 1
script = importlib.import_module(f"homeassistant.scripts.{args[0]}")
config_dir = extract_config_dir()
loop = asyncio.get_event_loop()
if not is_virtual_env():
loop.run_until_complete(async_mount_local_lib_path(config_dir))
_pip_kwargs = pip_kwargs(config_dir)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for req in getattr(script, "REQUIREMENTS", []):
if is_installed(req):
continue
if not install_package(req, **_pip_kwargs):
print("Aborting script, could not install dependency", req)
return 1
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
return script.run(args[1:]) # type: ignore
def extract_config_dir(args: Sequence[str] | None = None) -> str:
"""Extract the config dir from the arguments or get the default."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", default=None)
parsed_args = parser.parse_known_args(args)[0]
return (
os.path.join(os.getcwd(), parsed_args.config)
if parsed_args.config
else get_default_config_dir()
)
| w1ll1am23/home-assistant | homeassistant/scripts/__init__.py | Python | apache-2.0 | 2,358 |
DVC_CHECKPOINT = "DVC_CHECKPOINT"
DVC_DAEMON = "DVC_DAEMON"
DVC_PAGER = "DVC_PAGER"
DVC_ROOT = "DVC_ROOT"
DVCLIVE_PATH = "DVCLIVE_PATH"
DVCLIVE_SUMMARY = "DVCLIVE_SUMMARY"
DVCLIVE_HTML = "DVCLIVE_HTML"
DVCLIVE_RESUME = "DVCLIVE_RESUME"
DVC_IGNORE_ISATTY = "DVC_IGNORE_ISATTY"
DVC_EXP_GIT_REMOTE = "DVC_EXP_GIT_REMOTE"
DVC_EXP_AUTO_PUSH = "DVC_EXP_AUTO_PUSH"
DVC_NO_ANALYTICS = "DVC_NO_ANALYTICS"
| dmpetrov/dataversioncontrol | dvc/env.py | Python | apache-2.0 | 396 |
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class NaturesBestSpider(BaseSpider):
name = 'naturesbest.co.uk'
allowed_domains = ['www.naturesbest.co.uk', 'naturesbest.co.uk']
start_urls = ('http://www.naturesbest.co.uk/page/productdirectory/?alpha=abcde',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=fghij',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=klmno',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=pqrst',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=uvwxyz')
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
letter_links = hxs.select(u'//div[@class="content"]')
for letter_link in letter_links:
prod_urls = letter_link.select(u'./div/a/@href').extract()
for prod_url in prod_urls:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select(u'//div[@class="productTITLE"]/h1/text()').extract()
if name:
url = response.url
url = urljoin_rfc(get_base_url(response), url)
skus = hxs.select('//td[@class="skuname"]/text()').extract()
prices = hxs.select('//td[@class="price"]/text()').extract()
skus_prices = zip(skus, prices)
for sku, price in skus_prices:
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
loader.add_value('name', (name[0].strip() + ' ' + sku.strip()).replace(u'\xa0', ' '))
#loader.add_value('sku', sku)
loader.add_value('price', price)
yield loader.load_item() | 0--key/lib | portfolio/Python/scrapy/naturebest/naturesbest.py | Python | apache-2.0 | 2,587 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.tasks_v2beta2.proto import (
cloudtasks_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
queue_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
task_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudTasksStub(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListQueues = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListQueues",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.FromString,
)
self.GetQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.CreateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.UpdateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/UpdateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.DeleteQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PurgeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PurgeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.PauseQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PauseQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.ResumeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ResumeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
self.ListTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.FromString,
)
self.GetTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CreateTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.DeleteTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LeaseTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/LeaseTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.FromString,
)
self.AcknowledgeTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/AcknowledgeTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RenewLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RenewLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CancelLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CancelLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.RunTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RunTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
class CloudTasksServicer(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def ListQueues(self, request, context):
"""Lists queues.
Queues are returned in lexicographical order.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetQueue(self, request, context):
"""Gets a queue.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateQueue(self, request, context):
"""Creates a queue.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateQueue(self, request, context):
"""Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless of whether
it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteQueue(self, request, context):
"""Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be created
for 7 days.
WARNING: Using this method may have unintended side effects if you are
using an App Engine `queue.yaml` or `queue.xml` file to manage your queues.
Read
[Overview of Queue Management and
queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using
this method.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PurgeQueue(self, request, context):
"""Purges a queue by deleting all of its tasks.
All tasks created before this method is called are permanently deleted.
Purge operations can take up to one minute to take effect. Tasks
might be dispatched before the purge takes effect. A purge is irreversible.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PauseQueue(self, request, context):
"""Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added
when the queue is paused. A queue is paused if its
[state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ResumeQueue(self, request, context):
"""Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored
in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it
will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can
lead to target overloading. If you are resuming high-QPS
queues, follow the 500/50/5 pattern described in
[Managing Cloud Tasks Scaling
Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue].
Returns an empty policy if the resource exists and does not have a policy
set.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.getIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM permissions yet.
Project-level permissions are required to use the Cloud Console.
Authorization requires the following
[Google IAM](https://cloud.google.com/iam) permission on the specified
resource parent:
* `cloudtasks.queues.setIamPolicy`
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue].
If the resource does not exist, this will return an empty set of
permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTasks(self, request, context):
"""Lists the tasks in a queue.
By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved
due to performance considerations;
[response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the
subset of information which is returned.
The tasks may be returned in any order. The ordering may change at any
time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTask(self, request, context):
"""Gets a task.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTask(self, request, context):
"""Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask command.
* For [App Engine queues][google.cloud.tasks.v2beta2.AppEngineHttpTarget], the maximum task size is
100KB.
* For [pull queues][google.cloud.tasks.v2beta2.PullTarget], the maximum task size is 1MB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTask(self, request, context):
"""Deletes a task.
A task can be deleted if it is scheduled or dispatched. A task
cannot be deleted if it has completed successfully or permanently
failed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def LeaseTasks(self, request, context):
"""Leases tasks from a pull queue for
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration].
This method is invoked by the worker to obtain a lease. The
worker must acknowledge the task via
[AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have
performed the work associated with the task.
The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that
the worker needs to perform the work associated with the task. To
return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set
[response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to
[FULL][google.cloud.tasks.v2beta2.Task.View.FULL].
A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
requests are allowed per
queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is returned when this limit is
exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED]
is also returned when
[max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second]
is exceeded.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AcknowledgeTask(self, request, context):
"""Acknowledges a pull task.
The worker, that is, the entity that
[leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method
to indicate that the work associated with the task has finished.
The worker must acknowledge a task within the
[lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease
will expire and the task will become available to be leased
again. After the task is acknowledged, it will not be returned
by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks],
[GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or
[ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RenewLease(self, request, context):
"""Renew the current lease of a pull task.
The worker can use this method to extend the lease by a new
duration, starting from now. The new task lease will be
returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelLease(self, request, context):
"""Cancel a pull task's lease.
The worker can use this method to cancel a task's lease by
setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will
make the task available to be leased to the next caller of
[LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunTask(self, request, context):
"""Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task, even if
the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or
is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed
task after a fix has been made or to manually force a task to be
dispatched now.
The dispatched task is returned. That is, the task that is returned
contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but
before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified
in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig].
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a
[pull task][google.cloud.tasks.v2beta2.PullMessage].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_CloudTasksServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListQueues": grpc.unary_unary_rpc_method_handler(
servicer.ListQueues,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.SerializeToString,
),
"GetQueue": grpc.unary_unary_rpc_method_handler(
servicer.GetQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"CreateQueue": grpc.unary_unary_rpc_method_handler(
servicer.CreateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"UpdateQueue": grpc.unary_unary_rpc_method_handler(
servicer.UpdateQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"DeleteQueue": grpc.unary_unary_rpc_method_handler(
servicer.DeleteQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"PurgeQueue": grpc.unary_unary_rpc_method_handler(
servicer.PurgeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"PauseQueue": grpc.unary_unary_rpc_method_handler(
servicer.PauseQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"ResumeQueue": grpc.unary_unary_rpc_method_handler(
servicer.ResumeQueue,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
"ListTasks": grpc.unary_unary_rpc_method_handler(
servicer.ListTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.SerializeToString,
),
"GetTask": grpc.unary_unary_rpc_method_handler(
servicer.GetTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CreateTask": grpc.unary_unary_rpc_method_handler(
servicer.CreateTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"DeleteTask": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LeaseTasks": grpc.unary_unary_rpc_method_handler(
servicer.LeaseTasks,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.SerializeToString,
),
"AcknowledgeTask": grpc.unary_unary_rpc_method_handler(
servicer.AcknowledgeTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"RenewLease": grpc.unary_unary_rpc_method_handler(
servicer.RenewLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"CancelLease": grpc.unary_unary_rpc_method_handler(
servicer.CancelLease,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
"RunTask": grpc.unary_unary_rpc_method_handler(
servicer.RunTask,
request_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.FromString,
response_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.tasks.v2beta2.CloudTasks", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| tseaver/google-cloud-python | tasks/google/cloud/tasks_v2beta2/proto/cloudtasks_pb2_grpc.py | Python | apache-2.0 | 29,408 |
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from django.db.models import Count, QuerySet
from zerver.models import UserActivity, UserProfile, Realm, \
get_realm, get_user_profile_by_email
import datetime
class Command(BaseCommand):
help = """Report rough client activity globally, for a realm, or for a user
Usage examples:
./manage.py client_activity
./manage.py client_activity zulip.com
./manage.py client_activity jesstess@zulip.com"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('arg', metavar='<arg>', type=str, nargs='?', default=None,
help="realm or user to estimate client activity for")
def compute_activity(self, user_activity_objects):
# type: (QuerySet) -> None
# Report data from the past week.
#
# This is a rough report of client activity because we inconsistently
# register activity from various clients; think of it as telling you
# approximately how many people from a group have used a particular
# client recently. For example, this might be useful to get a sense of
# how popular different versions of a desktop client are.
#
# Importantly, this does NOT tell you anything about the relative
# volumes of requests from clients.
threshold = datetime.datetime.now() - datetime.timedelta(days=7)
client_counts = user_activity_objects.filter(
last_visit__gt=threshold).values("client__name").annotate(
count=Count('client__name'))
total = 0
counts = []
for client_type in client_counts:
count = client_type["count"]
client = client_type["client__name"]
total += count
counts.append((count, client))
counts.sort()
for count in counts:
print("%25s %15d" % (count[1], count[0]))
print("Total:", total)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
if options['arg'] is None:
# Report global activity.
self.compute_activity(UserActivity.objects.all())
else:
arg = options['arg']
try:
# Report activity for a user.
user_profile = get_user_profile_by_email(arg)
self.compute_activity(UserActivity.objects.filter(
user_profile=user_profile))
except UserProfile.DoesNotExist:
try:
# Report activity for a realm.
realm = get_realm(arg)
self.compute_activity(UserActivity.objects.filter(
user_profile__realm=realm))
except Realm.DoesNotExist:
print("Unknown user or domain %s" % (arg,))
exit(1)
| calvinleenyc/zulip | analytics/management/commands/client_activity.py | Python | apache-2.0 | 3,040 |
#!/usr/bin/env python
# Copyright (c) 2014, Alessandro Gatti - frob.it
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import sys
DESCRIPTION = 'Dumps given data as a list of binary values of arbitrary length'
NAME = 'dumpbinary'
LITTLE_ENDIAN = 0
BIG_ENDIAN = 1
BOTH_ENDIAN = 2
def print_big_endian(strip_spaces, *data):
output = ''
for byte in data:
fragment = '{:08b} '.format(byte)
if strip_spaces:
output += fragment.strip()
else:
output += fragment
return output.strip()
def print_little_endian(strip_spaces, *data):
output = ''
for byte in reversed(data):
fragment = '{:08b} '.format(byte)
if strip_spaces:
output += fragment.strip()
else:
output += fragment
return output.strip()
def print_line(strip_spaces, offset, endian, *data):
output = '{:08X}: '.format(offset)
if endian == BIG_ENDIAN:
print(print_big_endian(strip_spaces, *data))
elif endian == LITTLE_ENDIAN:
print(print_little_endian(strip_spaces, *data))
elif endian == BOTH_ENDIAN:
print('%s | %s' % (print_big_endian(strip_spaces, *data),
print_little_endian(strip_spaces, *data)))
def dump_byte(input_file):
offset = 0
while True:
byte = input_file.read(1)
if len(byte) == 0:
break
print_line(False, offset, ord(byte))
offset += 1
def dump_word(input_file, endian, strip_spaces):
offset = 0
while True:
word = input_file.read(2)
if len(word) == 0:
break
elif len(word) == 1:
raise Exception('Unaligned data')
else:
print_line(strip_spaces, offset, endian, ord(word[0]),
ord(word[1]))
offset += 2
def dump_dword(input_file, endian, strip_spaces):
offset = 0
while True:
dword = input_file.read(4)
if len(dword) == 0:
break
elif len(dword) != 4:
raise Exception('Unaligned data')
else:
print_line(strip_spaces, offset, endian, ord(dword[0]),
ord(dword[1]), ord(dword[2]), ord(dword[3]))
offset += 4
def dump(input_file, length, endian, strip_spaces):
if length == 1:
dump_byte(input_file)
elif length == 2:
dump_word(input_file, endian, strip_spaces)
elif length == 4:
dump_dword(input_file, endian, strip_spaces)
else:
pass
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=NAME, description=DESCRIPTION)
endianness = parser.add_mutually_exclusive_group(required=True)
endianness.add_argument('--endian', metavar='endian',
choices=('little', 'big', 'l', 'b'),
help='endianness of the input data')
endianness.add_argument('-l', '--little',
help='shortcut for --endian little',
action='store_true')
endianness.add_argument('-b', '--big', help='shortcut for --endian big',
action='store_true')
endianness.add_argument('--both', action='store_true',
help='show both endianness data side by side')
parser.add_argument('-c', '--compact', action='store_true',
help='do not print spaces between bytes')
parser.add_argument('length', metavar='length', type=int,
choices=(1, 2, 4),
help='length in bytes of the binary values')
parser.add_argument('infile', metavar='input_file', nargs='?',
type=argparse.FileType('r'), default=sys.stdin,
help='the file to read from, or STDIN')
parser.add_argument('--version', action='version', version='0.0.1')
arguments = parser.parse_args()
endian = None
if arguments.both:
endian = BOTH_ENDIAN
elif arguments.big or arguments.endian in ('big', 'b'):
endian = BIG_ENDIAN
else:
endian = LITTLE_ENDIAN
sys.exit(dump(arguments.infile, arguments.length, endian,
arguments.compact))
| arthurpro/HopperPlugins | tools/dumpbinary.py | Python | bsd-2-clause | 5,539 |
import os
import errno
def delete_file(file_name, dry=False):
if dry:
print(' DRY DELETED: {}'.format(file_name))
else:
os.remove(file_name)
try:
dirname = os.path.dirname(file_name)
os.rmdir(dirname)
print(' DELETED DIR: {}'.format(dirname))
except OSError as ex:
if ex.errno != errno.ENOTEMPTY:
raise
print(' DELETED: {}'.format(file_name))
def run_dircmpdel(dircmp_file, prompt=True, dry=False):
"""
Parse dircmp file for groups of file names to be deleted.
"""
with open(dircmp_file) as fp:
lines = fp.read()
groups = lines.strip().split('\n\n')
print('Found {} duplicate groups'.format(len(groups)))
groups = (group.split('\n') for group in groups)
checked_proper_cwd = False
for group in groups:
for i, file_name in enumerate(group):
if not i:
if not checked_proper_cwd:
if not os.path.exists(file_name):
raise RuntimeError('File {} could not be found. '
'Please ensure you are in the '
'correct directory.'
''.format(file_name))
checked_proper_cwd = True
print('Deleting duplicates of {}'.format(file_name))
else:
if prompt:
while True:
resp = input(' Delete {}? '.format(file_name))
resp = resp.lower()
if resp not in ('yes', 'no'):
print('Please answer "yes" or "no".')
elif resp == 'yes':
delete_file(file_name, dry=dry)
break
elif resp == 'no':
print(' Not deleted: {}'.format(file_name))
break
else:
delete_file(file_name, dry=dry)
print()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Utility for deleting duplicate files found by dircmp'
)
parser.add_argument('file')
parser.add_argument('--no-prompt',
action='store_false', default=True, dest='prompt')
parser.add_argument('-d', '--dry',
action='store_true', default=False, dest='dry')
args = parser.parse_args()
run_dircmpdel(args.file, prompt=args.prompt, dry=args.dry)
| logston/python-dircmp | dircmppy/dircmpdel.py | Python | bsd-2-clause | 2,648 |
# -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.conf import settings
from django.utils.encoding import force_str
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.apps import apps ; get_models = apps.get_models
from lino.api import dd, rt
from lino.core import actions
from lino.core.utils import full_model_name
from lino.core.roles import SiteStaff
from lino.modlib.printing.choicelists import BuildMethods
from lino.modlib.checkdata.choicelists import Checker
# import them here to have them on rt.models.system:
from .choicelists import YesNo, Genders, PeriodEvents
from .mixins import Lockable
class BuildSiteCache(dd.Action):
label = _("Rebuild site cache")
url_action_name = "buildjs"
def run_from_ui(self, ar):
settings.SITE.kernel.default_renderer.build_site_cache(True)
return ar.success(
"""\
Seems that it worked. Refresh your browser.
<br>
Note that other users might experience side effects because
of the unexpected .js update, but there are no known problems so far.
Please report any anomalies.""",
alert=_("Success"))
class SiteConfigManager(models.Manager):
def get(self, *args, **kwargs):
return settings.SITE.site_config
class SiteConfig(dd.Model):
class Meta(object):
abstract = dd.is_abstract_model(__name__, 'SiteConfig')
verbose_name = _("Site configuration")
objects = SiteConfigManager()
real_objects = models.Manager()
default_build_method = BuildMethods.field(
verbose_name=_("Default build method"),
blank=True, null=True)
simulate_today = models.DateField(
_("Simulated date"), blank=True, null=True)
site_company = dd.ForeignKey(
"contacts.Company",
blank=True, null=True,
verbose_name=_("Site owner"),
related_name='site_company_sites')
def __str__(self):
return force_str(_("Site Parameters"))
def update(self, **kw):
"""
Set some field of the SiteConfig object and store it to the
database.
"""
# print("20180502 update({})".format(kw))
for k, v in kw.items():
if not hasattr(self, k):
raise Exception("SiteConfig has no attribute %r" % k)
setattr(self, k, v)
self.full_clean()
self.save()
def save(self, *args, **kw):
# print("20180502 save() {}".format(dd.obj2str(self, True)))
super(SiteConfig, self).save(*args, **kw)
settings.SITE.clear_site_config()
def my_handler(sender, **kw):
# print("20180502 {} my_handler calls clear_site_config()".format(
# settings.SITE))
settings.SITE.clear_site_config()
#~ kw.update(sender=sender)
# dd.database_connected.send(sender)
#~ dd.database_connected.send(sender,**kw)
from django.test.signals import setting_changed
from lino.core.signals import testcase_setup
setting_changed.connect(my_handler)
testcase_setup.connect(my_handler)
dd.connection_created.connect(my_handler)
models.signals.post_migrate.connect(my_handler)
class SiteConfigs(dd.Table):
model = 'system.SiteConfig'
required_roles = dd.login_required(SiteStaff)
# default_action = actions.ShowDetail()
#~ has_navigator = False
hide_navigator = True
allow_delete = False
# hide_top_toolbar = True
#~ can_delete = perms.never
detail_layout = dd.DetailLayout("""
default_build_method
# lino.ModelsBySite
""", window_size=(60, 'auto'))
@classmethod
def get_default_action(cls):
return cls.detail_action
do_build = BuildSiteCache()
# if settings.SITE.user_model == 'users.User':
# dd.inject_field(settings.SITE.user_model,
# 'user_type', UserTypes.field())
# dd.inject_field(settings.SITE.user_model, 'language', dd.LanguageField())
class BleachChecker(Checker):
verbose_name = _("Find unbleached html content")
model = dd.Model
def get_checkable_models(self):
for m in super(BleachChecker, self).get_checkable_models():
if len(m._bleached_fields):
yield m
def get_checkdata_problems(self, obj, fix=False):
t = tuple(obj.fields_to_bleach())
if len(t):
fldnames = ', '.join([f.name for f, old, new in t])
yield (True, _("Fields {} have unbleached content.").format(fldnames))
if fix:
obj.before_ui_save(None, None)
obj.full_clean()
obj.save()
BleachChecker.activate()
| lino-framework/lino | lino/modlib/system/models.py | Python | bsd-2-clause | 4,683 |
# -*- coding: utf-8 -*-
"""
/*******************************************************************************
* Copyright (c) cortical.io GmbH. All rights reserved.
*
* This software is confidential and proprietary information.
* You shall use it only in accordance with the terms of the
* license agreement you entered into with cortical.io GmbH.
******************************************************************************/
"""
import unittest
from cortical.expressionsApi import ExpressionsApi
import testConfiguration as conf
oneTermInputJSON = '{ "term" : "apple" }'
class TestClientExpreissions(unittest.TestCase):
def setUp(self):
self.api = ExpressionsApi(conf.client)
self.jsonBulkExpression = None
# path relative to current working dir
with open("bulkInput.json", "r") as f:
self.jsonBulkExpression = "".join(f.readlines())
def testExpressions(self):
fp = self.api.resolveExpression(conf.RETINA_NAME, oneTermInputJSON, sparsity=0.5)
self.assertGreater(len(fp.positions), 100)
def testContexts(self):
contexts = self.api.getContextsForExpression(conf.RETINA_NAME, oneTermInputJSON, get_fingerprint=True, start_index=0, max_results=3, sparsity=1.0)
self.assertTrue(contexts != None)
self.assertEqual(3, len(contexts))
c0 = contexts[0]
self.assertGreater(len(c0.fingerprint.positions), 100)
self.assertTrue(isinstance(c0.context_label, unicode))
self.assertTrue(c0.context_id == 0)
def testSimilarTerms(self):
terms = self.api.getSimilarTermsForExpressionContext(conf.RETINA_NAME, oneTermInputJSON, context_id=None, pos_type="NOUN", get_fingerprint=True, start_index=0, max_results=8, sparsity=1.0)
self.assertTrue(terms != None)
self.assertEqual(8, len(terms))
t0 = terms[0]
self.assertGreater(len(t0.fingerprint.positions), 100)
self.assertTrue(t0 != None)
def testExpressionBulk(self):
fps = self.api.resolveBulkExpression(conf.RETINA_NAME, self.jsonBulkExpression)
self.assertEqual(6, len(fps))
for fp in fps:
self.assertGreater(len(fp.positions), 50)
def testExpressionContextsBulk(self):
contextsList = self.api.getContextsForBulkExpression(conf.RETINA_NAME, self.jsonBulkExpression, get_fingerprint=True, start_index=0, max_results=3)
self.assertEqual(len(contextsList), 6)
for contextList in contextsList:
for i, context in enumerate(contextList):
self.assertGreater(len(context.fingerprint.positions), 50)
self.assertTrue(isinstance(context.context_label, unicode))
self.assertTrue(context.context_id == i)
def testExpressionSimilarTermsBulk(self):
termsLists = self.api.getSimilarTermsForBulkExpressionContext(conf.RETINA_NAME, self.jsonBulkExpression, get_fingerprint=True, max_results=7)
self.assertTrue(len(termsLists) == 6)
for termList in termsLists:
self.assertTrue(len(termList) == 7)
self.assertGreater(len(termList[0].fingerprint.positions), 100)
if __name__ == "__main__":
unittest.main()
| cortical-io/python-client-sdk | tests/unit/testExpressionsApi.py | Python | bsd-2-clause | 3,216 |
#!/usr/bin/env python
# reflect input bytes to output, printing as it goes
import serial, sys, optparse, time
parser = optparse.OptionParser("davis_log")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
opts, args = parser.parse_args()
if len(args) != 2:
print("usage: reflector.py <DEVICE> <logfile>")
sys.exit(1)
device = args[0]
logfile = args[1]
port = serial.Serial(device, opts.baudrate, timeout=5, dsrdtr=False, rtscts=False, xonxoff=False)
log = open(logfile, mode="a")
while True:
line = port.readline()
line = line.rstrip()
out = "%s %.2f\n" % (line, time.time())
log.write(out);
log.flush()
sys.stdout.write(out)
sys.stdout.flush()
| tridge/DavisSi1000 | Firmware/tools/davis_log.py | Python | bsd-2-clause | 715 |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# jona-sassenhagen <jona.sassenhagen@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
from io import BytesIO
import os
import os.path as op
import sys
import time
import copy
import traceback
import warnings
import numpy as np
from collections import OrderedDict
from .colormap import calculate_lut
from .surface import _Surface
from .view import views_dicts, _lh_views_dict
from .callback import (ShowView, TimeCallBack, SmartCallBack,
UpdateLUT, UpdateColorbarScale)
from ..utils import (_show_help_fig, _get_color_list, concatenate_images,
_generate_default_filename, _save_ndarray_img)
from .._3d import (_process_clim, _handle_time, _check_views,
_handle_sensor_types, _plot_sensors)
from ...defaults import _handle_default, DEFAULTS
from ...externals.decorator import decorator
from ...fixes import _point_data, _cell_data
from ..._freesurfer import (vertex_to_mni, read_talxfm, read_freesurfer_lut,
_get_head_surface, _get_skull_surface)
from ...io.pick import pick_types
from ...io.meas_info import Info
from ...surface import (mesh_edges, _mesh_borders, _marching_cubes,
get_meg_helmet_surf)
from ...source_space import SourceSpaces
from ...transforms import (apply_trans, invert_transform, _get_trans,
_get_transforms_to_coord_frame)
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn,
get_subjects_dir, _check_fname, _to_rgb)
_ARROW_MOVE = 10 # degrees per press
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _Overlay(object):
def __init__(self, scalars, colormap, rng, opacity, name):
self._scalars = scalars
self._colormap = colormap
assert rng is not None
self._rng = rng
self._opacity = opacity
self._name = name
def to_colors(self):
from .._3d import _get_cmap
from matplotlib.colors import Colormap, ListedColormap
if isinstance(self._colormap, str):
cmap = _get_cmap(self._colormap)
elif isinstance(self._colormap, Colormap):
cmap = self._colormap
else:
cmap = ListedColormap(
self._colormap / 255., name=str(type(self._colormap)))
logger.debug(
f'Color mapping {repr(self._name)} with {cmap.name} '
f'colormap and range {self._rng}')
rng = self._rng
assert rng is not None
scalars = _norm(self._scalars, rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
def _norm(x, rng):
if rng[0] == rng[1]:
factor = 1 if rng[0] == 0 else 1e-6 * rng[0]
else:
factor = rng[1] - rng[0]
return (x - rng[0]) / factor
class _LayeredMesh(object):
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._current_colors = None
self._cached_colors = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = 'Data'
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:] # * 1
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = cache = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
cache = B
B = self._compute_over(cache, A)
return B, cache
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity,
name=name,
)
self._overlays[name] = overlay
colors = overlay.to_colors()
if self._current_colors is None:
self._current_colors = colors
else:
# save previous colors to cache
self._cached_colors = self._current_colors
self._current_colors = self._compute_over(
self._cached_colors, colors)
# apply the texture
self._apply()
def remove_overlay(self, names):
to_update = False
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
to_update = True
if to_update:
self.update()
def _apply(self):
if self._current_colors is None or self._renderer is None:
return
self._renderer._set_mesh_scalars(
mesh=self._polydata,
scalars=self._current_colors,
name=self._default_scalars_name,
)
def update(self, colors=None):
if colors is not None and self._cached_colors is not None:
self._current_colors = self._compute_over(
self._cached_colors, colors)
else:
self._current_colors, self._cached_colors = \
self._compose_overlays()
self._apply()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None,
opacity=None, rng=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
if rng is not None:
overlay._rng = rng
# partial update: use cache if possible
if name == list(self._overlays.keys())[-1]:
self.update(colors=overlay.to_colors())
else: # full update
self.update()
@fill_doc
class Brain(object):
"""Class for visualizing a brain.
.. warning::
The API for this class is not currently complete. We suggest using
:meth:`mne.viz.plot_source_estimates` with the PyVista backend
enabled to obtain a ``Brain`` instance.
Parameters
----------
subject_id : str
Subject name in Freesurfer subjects dir.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : str
FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).
title : str
Title for the window.
cortex : str, list, dict
Specifies how the cortical surface is rendered. Options:
1. The name of one of the preset cortex styles:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'``.
2. A single color-like argument to render the cortex as a single
color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``.
3. A list of two color-like used to render binarized curvature
values for gyral (first) and sulcal (second). regions, e.g.,
``['red', 'blue']`` or ``[(1, 0, 0), (0, 0, 1)]``.
4. A dict containing keys ``'vmin', 'vmax', 'colormap'`` with
values used to render the binarized curvature (where 0 is gyral,
1 is sulcal).
.. versionchanged:: 0.24
Add support for non-string arguments.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : int | array-like, shape (2,)
The size of the window, in pixels. can be one number to specify
a square window, or a length-2 sequence to specify (width, height).
background : tuple(int, int, int)
The color definition of the background: (red, green, blue).
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of Figure | None
If None (default), a new window will be created with the appropriate
views.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
The views to use.
offset : bool | str
If True, shifts the right- or left-most x coordinate of the left and
right surfaces, respectively, to be at zero. This is useful for viewing
inflated surface where hemispheres typically overlap. Can be "auto"
(default) use True with inflated surfaces and False otherwise
(Default: 'auto'). Only used when ``hemi='both'``.
.. versionchanged:: 0.23
Default changed to "auto".
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
%(view_layout)s
silhouette : dict | bool
As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity
and ``decimate`` (level of decimation between 0 and 1 or None) of the
brain's silhouette to display. If True, the default values are used
and if False, no silhouette will be displayed. Defaults to False.
theme : str | path-like
Can be "auto" (default), "light", or "dark" or a path-like to a
custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection,
:mod:`qdarkstyle` respectively and `darkdetect
<https://github.com/albertosottile/darkdetect>`__ is required.
show : bool
Display the window as soon as it is ready. Defaults to True.
Attributes
----------
geo : dict
A dictionary of PyVista surface objects for each hemisphere.
overlays : dict
The overlays.
Notes
-----
This table shows the capabilities of each Brain backend ("✓" for full
support, and "-" for partial support):
.. table::
:widths: auto
+---------------------------+--------------+---------------+
| 3D function: | surfer.Brain | mne.viz.Brain |
+===========================+==============+===============+
| add_annotation | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_foci | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_head | | ✓ |
+---------------------------+--------------+---------------+
| add_label | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_sensors | | ✓ |
+---------------------------+--------------+---------------+
| add_skull | | ✓ |
+---------------------------+--------------+---------------+
| add_text | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_volume_labels | | ✓ |
+---------------------------+--------------+---------------+
| close | ✓ | ✓ |
+---------------------------+--------------+---------------+
| data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| foci | ✓ | |
+---------------------------+--------------+---------------+
| labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_data | | ✓ |
+---------------------------+--------------+---------------+
| remove_foci | ✓ | |
+---------------------------+--------------+---------------+
| remove_head | | ✓ |
+---------------------------+--------------+---------------+
| remove_labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_annotations | - | ✓ |
+---------------------------+--------------+---------------+
| remove_sensors | | ✓ |
+---------------------------+--------------+---------------+
| remove_skull | | ✓ |
+---------------------------+--------------+---------------+
| remove_text | | ✓ |
+---------------------------+--------------+---------------+
| remove_volume_labels | | ✓ |
+---------------------------+--------------+---------------+
| scale_data_colormap | ✓ | |
+---------------------------+--------------+---------------+
| save_image | ✓ | ✓ |
+---------------------------+--------------+---------------+
| save_movie | ✓ | ✓ |
+---------------------------+--------------+---------------+
| screenshot | ✓ | ✓ |
+---------------------------+--------------+---------------+
| show_view | ✓ | ✓ |
+---------------------------+--------------+---------------+
| TimeViewer | ✓ | ✓ |
+---------------------------+--------------+---------------+
| enable_depth_peeling | | ✓ |
+---------------------------+--------------+---------------+
| get_picked_points | | ✓ |
+---------------------------+--------------+---------------+
| add_data(volume) | | ✓ |
+---------------------------+--------------+---------------+
| view_layout | | ✓ |
+---------------------------+--------------+---------------+
| flatmaps | | ✓ |
+---------------------------+--------------+---------------+
| vertex picking | | ✓ |
+---------------------------+--------------+---------------+
| label picking | | ✓ |
+---------------------------+--------------+---------------+
"""
def __init__(self, subject_id, hemi='both', surf='pial', title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', offset='auto', show_toolbar=False,
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', silhouette=False, theme='auto',
show=True):
from ..backends.renderer import backend, _get_renderer
if hemi is None:
hemi = 'vol'
hemi = self._check_hemi(hemi, extras=('both', 'split', 'vol'))
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
else:
assert hemi in ('lh', 'rh', 'vol')
self._hemis = (hemi, )
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject_id
else:
self._title = title
self._interaction = 'trackball'
self._bg_color = _to_rgb(background, name='background')
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
self._fg_color = _to_rgb(foreground, name='foreground')
del background, foreground
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2, vol=1)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple
subjects_dir = get_subjects_dir(subjects_dir)
self.theme = theme
self.time_viewer = False
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject_id = subject_id
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._unnamed_label_id = 0 # can only grow
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = dict()
self._actors = dict()
self._elevation_rng = [15, 165] # range of motion of camera on theta
self._lut_locked = None
self._cleaned = False
# default values for silhouette
self._silhouette = {
'color': self._bg_color,
'line_width': 2,
'alpha': alpha,
'decimate': 0.9,
}
_validate_type(silhouette, (dict, bool), 'silhouette')
if isinstance(silhouette, dict):
self._silhouette.update(silhouette)
self.silhouette = True
else:
self.silhouette = silhouette
self._scalar_bar = None
# for now only one time label can be added
# since it is the same for all figures
self._time_label_added = False
# array of data used by TimeViewer
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
# evaluate at the midpoint of the used colormap
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = geo_kwargs['colormap'](val)
# load geometry for one or both hemispheres as necessary
_validate_type(offset, (str, bool), 'offset')
if isinstance(offset, str):
_check_option('offset', offset, ('auto',), extra='when str')
offset = (surf in ('inflated', 'flat'))
offset = None if (not offset or hemi != 'both') else 0.0
logger.debug(f'Hemi offset: {offset}')
self._renderer = _get_renderer(name=self._title, size=size,
bgcolor=self._bg_color,
shape=shape,
fig=figure)
self._renderer._window_close_connect(self._clean)
self._renderer._window_set_theme(theme)
self.plotter = self._renderer.plotter
self._setup_canonical_rotation()
# plot hemis
for h in ('lh', 'rh'):
if h not in self._hemis:
continue # don't make surface if not chosen
# Initialize a Surface object as the geometry
geo = _Surface(self._subject_id, h, surf, self._subjects_dir,
offset, units=self._units, x_dir=self._rigid[0, :3])
# Load in the geometry and curvature
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for _, _, v in self._iter_views(h):
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
# add metadata to the mesh for picking
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor, render=False)
if self.silhouette:
mesh = self._layered_meshes[h]
self._renderer._silhouette(
mesh=mesh._polydata,
color=self._silhouette["color"],
line_width=self._silhouette["line_width"],
alpha=self._silhouette["alpha"],
decimate=self._silhouette["decimate"],
)
self._renderer.set_camera(update=False, reset_camera=False,
**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
# update the views once the geometry is all set
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
def _setup_canonical_rotation(self):
from ...coreg import fit_matched_points, _trans_from_params
self._rigid = np.eye(4)
try:
xfm = read_talxfm(self._subject_id, self._subjects_dir)
except Exception:
return
# XYZ+origin + halfway
pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])
pts_subj = apply_trans(invert_transform(xfm), pts_tal)
# we fit with scaling enabled, but then discard it (we just need
# the rigid-body components)
params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')
self._rigid[:] = _trans_from_params((True, True, False), params[:6])
def setup_time_viewer(self, time_viewer=True, show_traces=True):
"""Configure the time viewer parameters.
Parameters
----------
time_viewer : bool
If True, enable widgets interaction. Defaults to True.
show_traces : bool
If True, enable visualization of time traces. Defaults to True.
Notes
-----
The keyboard shortcuts are the following:
'?': Display help window
'i': Toggle interface
's': Apply auto-scaling
'r': Restore original clim
'c': Clear all traces
'n': Shift the time forward by the playback speed
'b': Shift the time backward by the playback speed
'Space': Start/Pause playback
'Up': Decrease camera elevation angle
'Down': Increase camera elevation angle
'Left': Decrease camera azimuth angle
'Right': Increase camera azimuth angle
"""
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [-1, 15]
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.01
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
# remove grey for better contrast on the brain
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.help_canvas = None
self.rms = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.callbacks = dict()
self.widgets = dict()
self.keys = ('fmin', 'fmid', 'fmax')
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex' # guaranteed above
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._configure_time_label()
self._configure_scalar_bar()
self._configure_shortcuts()
self._configure_picking()
self._configure_tool_bar()
self._configure_dock()
self._configure_menu()
self._configure_status_bar()
self._configure_playback()
self._configure_help()
# show everything at the end
self.toggle_interface()
self._renderer.show()
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
self._renderer._process_events()
self._renderer._update()
# finally, show the MplCanvas
if self.show_traces:
self.mpl_canvas.show()
@safe_event
def _clean(self):
# resolve the reference cycle
self.clear_glyphs()
self.remove_annotations()
# clear init actors
for hemi in self._hemis:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
self._clear_widgets()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
# XXX this should be done in PyVista
for renderer in self._renderer._all_renderers:
renderer.RemoveAllLights()
# app_window cannot be set to None because it is used in __del__
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
# Qt LeaveEvent requires _Iren so we use _FakeIren instead of None
# to resolve the ref to vtkGenericRenderWindowInteractor
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
# XXX end PyVista
for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',
'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_scalar_bar',
'actions', 'widgets', 'geo', '_data'):
setattr(self, key, None)
self._cleaned = True
def toggle_interface(self, value=None):
"""Toggle the interface.
Parameters
----------
value : bool | None
If True, the widgets are shown and if False, they
are hidden. If None, the state of the widgets is
toggled. Defaults to None.
"""
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar and dock
with self._renderer._window_ensure_minimum_sizes():
if self.visibility:
self._renderer._dock_show()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_on")
else:
self._renderer._dock_hide()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_off")
self._renderer._update()
def apply_auto_scaling(self):
"""Detect automatically fitting scaling parameters."""
self._update_auto_scaling()
def restore_user_scaling(self):
"""Restore original scaling parameters."""
self._update_auto_scaling(restore=True)
def toggle_playback(self, value=None):
"""Toggle time playback.
Parameters
----------
value : bool | None
If True, automatic time playback is enabled and if False,
it's disabled. If None, the state of time playback is toggled.
Defaults to None.
"""
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="pause")
else:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="play")
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time: # start over
self.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
"""Reset view and time step."""
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._renderer._update()
def set_playback_speed(self, speed):
"""Set the time playback speed.
Parameters
----------
speed : float
The speed of the playback.
"""
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
def _configure_scalar_bar(self):
if self._scalar_bar is not None:
self._scalar_bar.SetOrientationToVertical()
self._scalar_bar.SetHeight(0.6)
self._scalar_bar.SetWidth(0.05)
self._scalar_bar.SetPosition(0.02, 0.2)
def _configure_dock_time_widget(self, layout=None):
len_time = len(self._data['time']) - 1
if len_time < 1:
return
layout = self._renderer.dock_layout if layout is None else layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self.widgets["min_time"] = self._renderer._dock_add_label(
value="-", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["current_time"] = self._renderer._dock_add_label(
value="x", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["max_time"] = self._renderer._dock_add_label(
value="+", layout=hlayout)
self._renderer._layout_add_widget(layout, hlayout)
min_time = float(self._data['time'][0])
max_time = float(self._data['time'][-1])
self.widgets["min_time"].set_value(f"{min_time: .3f}")
self.widgets["max_time"].set_value(f"{max_time: .3f}")
self.widgets["current_time"].set_value(f"{self._current_time: .3f}")
def _configure_dock_playback_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
len_time = len(self._data['time']) - 1
# Time widget
if len_time < 1:
self.callbacks["time"] = None
self.widgets["time"] = None
else:
self.callbacks["time"] = TimeCallBack(
brain=self,
callback=self.plot_time_line,
)
self.widgets["time"] = self._renderer._dock_add_slider(
name="Time (s)",
value=self._data['time_idx'],
rng=[0, len_time],
double=True,
callback=self.callbacks["time"],
compact=False,
layout=layout,
)
self.callbacks["time"].widget = self.widgets["time"]
# Time labels
if len_time < 1:
self.widgets["min_time"] = None
self.widgets["max_time"] = None
self.widgets["current_time"] = None
else:
self._configure_dock_time_widget(layout)
self.callbacks["time"].label = self.widgets["current_time"]
# Playback speed widget
if len_time < 1:
self.callbacks["playback_speed"] = None
self.widgets["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartCallBack(
callback=self.set_playback_speed,
)
self.widgets["playback_speed"] = self._renderer._dock_add_spin_box(
name="Speed",
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range,
callback=self.callbacks["playback_speed"],
layout=layout,
)
self.callbacks["playback_speed"].widget = \
self.widgets["playback_speed"]
# Time label
current_time = self._current_time
assert current_time is not None # should never be the case, float
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
def _configure_dock_orientation_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
# Renderer widget
rends = [str(i) for i in range(len(self._renderer._all_renderers))]
if len(rends) > 1:
def select_renderer(idx):
idx = int(idx)
loc = self._renderer._index_to_loc(idx)
self.plotter.subplot(*loc)
self.callbacks["renderer"] = SmartCallBack(
callback=select_renderer,
)
self.widgets["renderer"] = self._renderer._dock_add_combo_box(
name="Renderer",
value="0",
rng=rends,
callback=self.callbacks["renderer"],
layout=layout,
)
self.callbacks["renderer"].widget = \
self.widgets["renderer"]
# Use 'lh' as a reference for orientation for 'both'
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
orientation_data = [None] * len(rends)
for hemi in hemis_ref:
for ri, ci, v in self._iter_views(hemi):
idx = self._renderer._loc_to_index((ri, ci))
if v == 'flat':
_data = None
else:
_data = dict(default=v, hemi=hemi, row=ri, col=ci)
orientation_data[idx] = _data
self.callbacks["orientation"] = ShowView(
brain=self,
data=orientation_data,
)
self.widgets["orientation"] = self._renderer._dock_add_combo_box(
name=None,
value=self.orientation[0],
rng=self.orientation,
callback=self.callbacks["orientation"],
layout=layout,
)
def _configure_dock_colormap_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
self._renderer._dock_add_label(
value="min / mid / max",
align=True,
layout=layout,
)
up = UpdateLUT(brain=self)
for key in self.keys:
hlayout = self._renderer._dock_add_layout(vertical=False)
rng = _get_range(self)
self.callbacks[key] = lambda value, key=key: up(**{key: value})
self.widgets[key] = self._renderer._dock_add_slider(
name=None,
value=self._data[key],
rng=rng,
callback=self.callbacks[key],
double=True,
layout=hlayout,
)
self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box(
name=None,
value=self._data[key],
callback=self.callbacks[key],
rng=rng,
layout=hlayout,
)
up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]]
self._renderer._layout_add_widget(layout, hlayout)
# reset / minus / plus
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_label(
value="Rescale",
align=True,
layout=hlayout,
)
self.widgets["reset"] = self._renderer._dock_add_button(
name="↺",
callback=self.restore_user_scaling,
layout=hlayout,
)
for key, char, val in (("fminus", "➖", 1.2 ** -0.25),
("fplus", "➕", 1.2 ** 0.25)):
self.callbacks[key] = UpdateColorbarScale(
brain=self,
factor=val,
)
self.widgets[key] = self._renderer._dock_add_button(
name=char,
callback=self.callbacks[key],
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
# register colorbar slider representations
widgets = {key: self.widgets[key] for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fminus", "fplus"):
self.callbacks[name].widgets = widgets
def _configure_dock_trace_widget(self, name):
if not self.show_traces:
return
# do not show trace mode for volumes
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
layout = self._renderer._dock_add_group_box(name)
# setup candidate annots
def _set_annot(annot):
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._renderer._update()
# setup label extraction parameters
def _set_label_mode(mode):
if self.traces_mode != 'label':
return
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._renderer._update()
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
dir_name = op.join(self._subjects_dir, self._subject_id, 'label')
cands = _read_annot_cands(dir_name, raise_error=False)
cands = cands + ['None']
self.annot = cands[0]
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
self.label_extract_mode = modes[-1]
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self.widgets["annotation"] = self._renderer._dock_add_combo_box(
name="Annotation",
value=self.annot,
rng=cands,
callback=_set_annot,
layout=layout,
)
self.widgets["extract_mode"] = self._renderer._dock_add_combo_box(
name="Extract mode",
value=self.label_extract_mode,
rng=modes,
callback=_set_label_mode,
layout=layout,
)
def _configure_dock(self):
self._renderer._dock_initialize()
self._configure_dock_playback_widget(name="Playback")
self._configure_dock_orientation_widget(name="Orientation")
self._configure_dock_colormap_widget(name="Color Limits")
self._configure_dock_trace_widget(name="Trace")
# Smoothing widget
self.callbacks["smoothing"] = SmartCallBack(
callback=self.set_data_smoothing,
)
self.widgets["smoothing"] = self._renderer._dock_add_spin_box(
name="Smoothing",
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range,
callback=self.callbacks["smoothing"],
double=False
)
self.callbacks["smoothing"].widget = \
self.widgets["smoothing"]
self._renderer._dock_finalize()
def _configure_playback(self):
self._renderer._playback_initialize(
func=self._play,
timeout=self.refresh_rate_ms,
value=self._data['time_idx'],
rng=[0, len(self._data['time']) - 1],
time_widget=self.widgets["time"],
play_widget=self.widgets["play"],
)
def _configure_mplcanvas(self):
# Get the fractional components for the brain and mpl
self.mpl_canvas = self._renderer._window_get_mplcanvas(
brain=self,
interactor_fraction=self.interactor_fraction,
show_traces=self.show_traces,
separate_canvas=self.separate_canvas
)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
self._renderer._window_adjust_mplcanvas_layout()
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
# plot RMS of the activation
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
del y
self.rms, = self.mpl_canvas.axes.plot(
self._data['time'], rms,
lw=3, label='RMS', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line(update=False)
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self._renderer._all_renderers[idx]
# initialize the default point
if self._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id, update=False)
def _configure_picking(self):
# get data for each hemi
from scipy import sparse
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _configure_tool_bar(self):
self._renderer._tool_bar_load_icons()
self._renderer._tool_bar_set_theme(self.theme)
self._renderer._tool_bar_initialize(name="Toolbar")
self._renderer._tool_bar_add_file_button(
name="screenshot",
desc="Take a screenshot",
func=self.save_image,
)
self._renderer._tool_bar_add_file_button(
name="movie",
desc="Save movie...",
func=lambda filename: self.save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed)),
shortcut="ctrl+shift+s",
)
self._renderer._tool_bar_add_button(
name="visibility",
desc="Toggle Controls",
func=self.toggle_interface,
icon_name="visibility_on"
)
self.widgets["play"] = self._renderer._tool_bar_add_play_button(
name="play",
desc="Play/Pause",
func=self.toggle_playback,
shortcut=" ",
)
self._renderer._tool_bar_add_button(
name="reset",
desc="Reset",
func=self.reset,
)
self._renderer._tool_bar_add_button(
name="scale",
desc="Auto-Scale",
func=self.apply_auto_scaling,
)
self._renderer._tool_bar_add_button(
name="clear",
desc="Clear traces",
func=self.clear_glyphs,
)
self._renderer._tool_bar_add_spacer()
self._renderer._tool_bar_add_button(
name="help",
desc="Help",
func=self.help,
shortcut="?",
)
def _shift_time(self, op):
self.callbacks["time"](
value=(op(self._current_time, self.playback_speed)),
time_as_index=False,
update_widget=True,
)
def _rotate_azimuth(self, value):
azimuth = (self._renderer.figure._azimuth + value) % 360
self._renderer.set_camera(azimuth=azimuth, reset_camera=False)
def _rotate_elevation(self, value):
elevation = np.clip(
self._renderer.figure._elevation + value,
self._elevation_rng[0],
self._elevation_rng[1],
)
self._renderer.set_camera(elevation=elevation, reset_camera=False)
def _configure_shortcuts(self):
# First, we remove the default bindings:
self._clear_callbacks()
# Then, we add our own:
self.plotter.add_key_event("i", self.toggle_interface)
self.plotter.add_key_event("s", self.apply_auto_scaling)
self.plotter.add_key_event("r", self.restore_user_scaling)
self.plotter.add_key_event("c", self.clear_glyphs)
self.plotter.add_key_event("n", partial(self._shift_time,
op=lambda x, y: x + y))
self.plotter.add_key_event("b", partial(self._shift_time,
op=lambda x, y: x - y))
for key, func, sign in (("Left", self._rotate_azimuth, 1),
("Right", self._rotate_azimuth, -1),
("Up", self._rotate_elevation, 1),
("Down", self._rotate_elevation, -1)):
self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))
def _configure_menu(self):
self._renderer._menu_initialize()
self._renderer._menu_add_submenu(
name="help",
desc="Help",
)
self._renderer._menu_add_button(
menu_name="help",
name="help",
desc="Show MNE key bindings\t?",
func=self.help,
)
def _configure_status_bar(self):
self._renderer._status_bar_initialize()
self.status_msg = self._renderer._status_bar_add_label(
self.default_status_bar_msg, stretch=1)
self.status_progress = self._renderer._status_bar_add_progress_bar()
if self.status_progress is not None:
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
try:
# pyvista<0.30.0
self.picked_renderer = \
self.plotter.iren.FindPokedRenderer(x, y)
except AttributeError:
# pyvista>=0.30.0
self.picked_renderer = \
self.plotter.iren.interactor.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = _cell_data(grid)['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self._cmap_range[1]
# _cell_data(grid)['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
# remove the patch if already picked
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id, update=True):
if vertex_id in self.picked_points[hemi]:
return
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color, update=update)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
try:
lst = self._renderer._all_renderers._renderers
except AttributeError:
lst = self._renderer._all_renderers
rindex = lst.index(self.picked_renderer)
row, col = self._renderer._index_to_loc(rindex)
actors = list()
spheres = list()
for _ in self._iter_views(hemi):
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = self._renderer._sphere(
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=render)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
"""Clear the picking glyphs."""
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.rms is not None:
self.rms.remove()
self.rms = None
self._renderer._update()
def plot_time_course(self, hemi, vertex_id, color, update=True):
"""Plot the vertex time course.
Parameters
----------
hemi : str
The hemisphere id of the vertex.
vertex_id : int
The vertex identifier in the mesh.
color : matplotlib color
The color of the time course.
update : bool
Force an update of the plot. Defaults to True.
Returns
-------
line : matplotlib object
The time line object.
"""
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
mni = None
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject_id, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
try:
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject_id,
subjects_dir=self._subjects_dir
)
except Exception:
mni = None
if mni is not None:
mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)
else:
mni = ''
label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni)
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
update=update,
)
return line
def plot_time_line(self, update=True):
"""Add the time line to the MPL widget.
Parameters
----------
update : bool
Force an update of the plot. Defaults to True.
"""
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
update=update,
)
self.time_line.set_xdata(current_time)
if update:
self.mpl_canvas.update_plot()
def _configure_help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('n', 'Shift the time forward by the playback speed'),
('b', 'Shift the time backward by the playback speed'),
('Space', 'Start/Pause playback'),
('Up', 'Decrease camera elevation angle'),
('Down', 'Increase camera elevation angle'),
('Left', 'Decrease camera azimuth angle'),
('Right', 'Increase camera azimuth angle'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
self.help_canvas = self._renderer._window_get_simple_canvas(
width=5, height=2, dpi=80)
_show_help_fig(
col1=text1,
col2=text2,
fig_help=self.help_canvas.fig,
ax=self.help_canvas.axes,
show=False,
)
def help(self):
"""Display the help window."""
self.help_canvas.show()
def _clear_callbacks(self):
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
for key in ('plotter', 'brain', 'callback',
'widget', 'widgets'):
setattr(callback, key, None)
self.callbacks.clear()
# Remove the default key binding
if getattr(self, "iren", None) is not None:
self.plotter.iren.clear_key_event_callbacks()
def _clear_widgets(self):
if not hasattr(self, 'widgets'):
return
for widget in self.widgets.values():
if widget is not None:
for key in ('triggered', 'valueChanged'):
setattr(widget, key, None)
self.widgets.clear()
@property
def interaction(self):
"""The interaction style."""
return self._interaction
@interaction.setter
def interaction(self, interaction):
"""Set the interaction style."""
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for _ in self._iter_views('vol'): # will traverse all
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
"""Return the colormap corresponding to the cortex."""
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
_validate_type(cortex, (str, dict, list, tuple), 'cortex')
if isinstance(cortex, str):
if cortex in colormap_map:
cortex = colormap_map[cortex]
else:
cortex = [cortex] * 2
if isinstance(cortex, (list, tuple)):
_check_option('len(cortex)', len(cortex), (2, 3),
extra='when cortex is a list or tuple')
if len(cortex) == 3:
cortex = [cortex] * 2
cortex = list(cortex)
for ci, c in enumerate(cortex):
cortex[ci] = _to_rgb(c, name='cortex')
cortex = dict(
colormap=ListedColormap(cortex, name='custom binary'),
vmin=0, vmax=1)
cortex = dict(
vmin=float(cortex['vmin']),
vmax=float(cortex['vmax']),
colormap=_get_cmap(cortex['colormap']),
)
return cortex
def _remove(self, item, render=False):
"""Remove actors from the rendered scene."""
if item in self._actors:
logger.debug(
f'Removing {len(self._actors[item])} {item} actor(s)')
for actor in self._actors[item]:
self._renderer.plotter.remove_actor(actor)
self._actors.pop(item) # remove actor list
if render:
self._renderer._update()
def _add_actor(self, item, actor):
"""Add an actor to the internal register."""
if item in self._actors: # allows adding more than one
self._actors[item].append(actor)
else:
self._actors[item] = [actor]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
"""Display data from a numpy array on the surface or volume.
This provides a similar interface to
:meth:`surfer.Brain.add_overlay`, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four-dimensional data
(i.e., a timecourse) or five-dimensional data (i.e., a
vector-valued timecourse).
.. note:: ``fmin`` sets the low end of the colormap, and is separate
from thresh (this is a different convention from
:meth:`surfer.Brain.add_overlay`).
Parameters
----------
array : numpy array, shape (n_vertices[, 3][, n_times])
Data array. For the data to be understood as vector-valued
(3 values per vertex corresponding to X/Y/Z surface RAS),
then ``array`` must be have all 3 dimensions.
If vectors with no time dimension are desired, consider using a
singleton (e.g., ``np.newaxis``) to create a "time" dimension
and pass ``time_label=None`` (vector values are not supported).
%(fmin_fmid_fmax)s
%(thresh)s
%(center)s
%(transparent)s
colormap : str, list of color, or array
Name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255), the default "auto" chooses a default divergent
colormap, if "center" is given (currently "icefire"), otherwise a
default sequential colormap (currently "rocket").
alpha : float in [0, 1]
Alpha level to control opacity of the overlay.
vertices : numpy array
Vertices for which the data is defined (needed if
``len(data) < nvtx``).
smoothing_steps : int or None
Number of smoothing steps (smoothing is used if len(data) < nvtx)
The value 'nearest' can be used too. None (default) will use as
many as necessary to fill the surface.
time : numpy array
Time points in the data array (if data is 2D or 3D).
%(time_label)s
colorbar : bool
Whether to add a colorbar to the figure. Can also be a tuple
to give the (row, col) index of where to put the colorbar.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Not supported yet.
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14).
initial_time : float | None
Time initially shown in the plot. ``None`` to use the first time
sample (default).
scale_factor : float | None (default)
The scale factor to use when displaying glyphs for vector-valued
data.
vector_alpha : float | None
Alpha level to control opacity of the arrows. Only used for
vector-valued data. If None (default), ``alpha`` is used.
clim : dict
Original clim arguments.
%(src_volume_options)s
colorbar_kwargs : dict | None
Options to pass to :meth:`pyvista.Plotter.add_scalar_bar`
(e.g., ``dict(title_font_size=10)``).
%(verbose)s
Notes
-----
If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Due to a VTK alpha rendering bug, ``vector_alpha`` is
clamped to be strictly < 1.
"""
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = -1
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for _ in self._iter_views(hemi):
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
self._add_actor('data', actor)
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# botto left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and self._scalar_bar is None and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._scalar_bar = self._renderer.scalarbar(**kwargs)
self._renderer.set_camera(
update=False, reset_camera=False, **views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut(alpha=alpha)
def remove_data(self):
"""Remove rendered data from the mesh."""
self._remove('data', render=True)
def _iter_views(self, hemi):
"""Iterate over rows and columns that need to be added to."""
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
if self._hemi == 'split':
hemi_dict.update(rh=[1], vol=[0, 1])
for vi, view in enumerate(self._views):
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._hemi == 'split':
view_dict.update(vol=[vi, vi])
if self._view_layout == 'vertical':
rows, cols = view_dict, hemi_dict # views are rows, hemis cols
else:
rows, cols = hemi_dict, view_dict # hemis are rows, views cols
for ri, ci in zip(rows[hemi], cols[hemi]):
self._renderer.subplot(ri, ci)
yield ri, ci, view
def remove_labels(self):
"""Remove all the ROI labels from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
for label in self._labels[hemi]:
mesh.remove_overlay(label.name)
self._labels[hemi].clear()
self._renderer._update()
def remove_annotations(self):
"""Remove all annotations from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
self._annots[hemi].clear()
self._renderer._update()
def _add_volume_data(self, hemi, src, volume_options):
from ..backends._pyvista import _hide_testing_actor
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
self._renderer._volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending,
center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False,
render=False)
actor_neg = actor_mesh = None
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False,
render=False)
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
actor_mesh, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False, render=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for _ in self._iter_views('vol'):
self._renderer._silhouette(
mesh=grid_mesh.GetInput(),
color=self._brain_color[:3],
line_width=silhouette_linewidth,
alpha=silhouette_alpha,
)
for actor in (actor_pos, actor_neg, actor_mesh):
if actor is not None:
_hide_testing_actor(actor)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
Label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
Anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson").
alpha : float in [0, 1]
Alpha level to control opacity.
scalar_thresh : None | float
Threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh).
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
reset_camera : bool
If True, reset the camera view after adding the label. Defaults
to True.
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label.name = 'unnamed' + str(self._unnamed_label_id)
self._unnamed_label_id += 1
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = _to_rgb(color, alpha, alpha=True)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if borders:
keep_idx = _mesh_borders(self.geo[hemi].faces, scalars)
show = np.zeros(scalars.size, dtype=np.int64)
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
for _, _, v in self._iter_views(hemi):
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=[np.min(scalars), np.max(scalars)],
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(update=False, **views_dicts[hemi][v])
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._renderer._update()
@fill_doc
def add_head(self, dense=True, color='gray', alpha=0.5):
"""Add a mesh to render the outer head surface.
Parameters
----------
dense : bool
Whether to plot the dense head (``seghead``) or the less dense head
(``head``).
color : color
A list of anything matplotlib accepts: string, RGB, hex, etc.
alpha : float in [0, 1]
Alpha level to control opacity.
Notes
-----
.. versionadded:: 0.24
"""
# load head
surf = _get_head_surface('seghead' if dense else 'head',
self._subject_id, self._subjects_dir)
verts, triangles = surf['rr'], surf['tris']
verts *= 1e3 if self._units == 'mm' else 1
color = _to_rgb(color, alpha, alpha=True)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('head', actor)
self._renderer._update()
def remove_head(self):
"""Remove head objects from the rendered scene."""
self._remove('head', render=True)
@fill_doc
def add_skull(self, outer=True, color='gray', alpha=0.5):
"""Add a mesh to render the skull surface.
Parameters
----------
outer : bool
Adds the outer skull if ``True``, otherwise adds the inner skull.
color : color
A list of anything matplotlib accepts: string, RGB, hex, etc.
alpha : float in [0, 1]
Alpha level to control opacity.
Notes
-----
.. versionadded:: 0.24
"""
surf = _get_skull_surface('outer' if outer else 'inner',
self._subject_id, self._subjects_dir)
verts, triangles = surf['rr'], surf['tris']
verts *= 1e3 if self._units == 'mm' else 1
color = _to_rgb(color, alpha, alpha=True)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('skull', actor)
self._renderer._update()
def remove_skull(self):
"""Remove skull objects from the rendered scene."""
self._remove('skull', render=True)
@fill_doc
def add_volume_labels(self, aseg='aparc+aseg', labels=None, colors=None,
alpha=0.5, smooth=0.9, fill_hole_size=None,
legend=None):
"""Add labels to the rendering from an anatomical segmentation.
Parameters
----------
%(aseg)s
labels : list
Labeled regions of interest to plot. See
:func:`mne.get_montage_volume_labels`
for one way to determine regions of interest. Regions can also be
chosen from the :term:`FreeSurfer LUT`.
colors : list | matplotlib-style color | None
A list of anything matplotlib accepts: string, RGB, hex, etc.
(default :term:`FreeSurfer LUT` colors).
alpha : float in [0, 1]
Alpha level to control opacity.
%(smooth)s
fill_hole_size : int | None
The size of holes to remove in the mesh in voxels. Default is None,
no holes are removed. Warning, this dilates the boundaries of the
surface by ``fill_hole_size`` number of voxels so use the minimal
size.
legend : bool | None | dict
Add a legend displaying the names of the ``labels``. Default (None)
is ``True`` if the number of ``labels`` is 10 or fewer.
Can also be a dict of ``kwargs`` to pass to
:meth:`pyvista.Plotter.add_legend`.
Notes
-----
.. versionadded:: 0.24
"""
import nibabel as nib
# load anatomical segmentation image
if not aseg.endswith('aseg'):
raise RuntimeError(
f'`aseg` file path must end with "aseg", got {aseg}')
aseg = _check_fname(op.join(self._subjects_dir, self._subject_id,
'mri', aseg + '.mgz'),
overwrite='read', must_exist=True)
aseg_fname = aseg
aseg = nib.load(aseg_fname)
aseg_data = np.asarray(aseg.dataobj)
vox_mri_t = aseg.header.get_vox2ras_tkr()
mult = 1e-3 if self._units == 'm' else 1
vox_mri_t[:3] *= mult
del aseg
# read freesurfer lookup table
lut, fs_colors = read_freesurfer_lut()
if labels is None: # assign default ROI labels based on indices
lut_r = {v: k for k, v in lut.items()}
labels = [lut_r[idx] for idx in DEFAULTS['volume_label_indices']]
_validate_type(fill_hole_size, (int, None), 'fill_hole_size')
_validate_type(legend, (bool, None), 'legend')
if legend is None:
legend = len(labels) < 11
if colors is None:
colors = [fs_colors[label] / 255 for label in labels]
elif not isinstance(colors, (list, tuple)):
colors = [colors] * len(labels) # make into list
colors = [_to_rgb(color, alpha, name=f'colors[{ci}]', alpha=True)
for ci, color in enumerate(colors)]
surfs = _marching_cubes(
aseg_data, [lut[label] for label in labels], smooth=smooth,
fill_hole_size=fill_hole_size)
for label, color, (verts, triangles) in zip(labels, colors, surfs):
if len(verts) == 0: # not in aseg vals
warn(f'Value {lut[label]} not found for label '
f'{repr(label)} in: {aseg_fname}')
continue
verts = apply_trans(vox_mri_t, verts)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('volume_labels', actor)
if legend or isinstance(legend, dict):
# use empty kwargs for legend = True
legend = legend if isinstance(legend, dict) else dict()
self._renderer.plotter.add_legend(
list(zip(labels, colors)), **legend)
self._renderer._update()
def remove_volume_labels(self):
"""Remove the volume labels from the rendered scene."""
self._remove('volume_labels', render=True)
self._renderer.plotter.remove_legend()
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : ndarray, shape (n_coords, 3)
Coordinates in stereotaxic space (default) or array of
vertex ids (with ``coord_as_verts=True``).
coords_as_verts : bool
Whether the coords parameter should be interpreted as vertex ids.
map_surface : None
Surface to map coordinates through, or None to use raw coords.
scale_factor : float
Controls the size of the foci spheres (relative to 1cm).
color : matplotlib color code
HTML name, RBG tuple, or hex code.
alpha : float in [0, 1]
Opacity of focus gylphs.
name : str
Internal name to use.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
resolution : int
The resolution of the spheres.
"""
hemi = self._check_hemi(hemi, extras=['vol'])
# those parameters are not supported yet, only None is allowed
_check_option('map_surface', map_surface, [None])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
# Convert the color code
color = _to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for _, _, v in self._iter_views(hemi):
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
@verbose
def add_sensors(self, info, trans, meg=None, eeg='original', fnirs=True,
ecog=True, seeg=True, dbs=True, verbose=None):
"""Add mesh objects to represent sensor positions.
Parameters
----------
%(info_not_none)s
%(trans_not_none)s
%(meg)s
%(eeg)s
%(fnirs)s
%(ecog)s
%(seeg)s
%(dbs)s
%(verbose)s
Notes
-----
.. versionadded:: 0.24
"""
_validate_type(info, Info, 'info')
meg, eeg, fnirs, warn_meg = _handle_sensor_types(meg, eeg, fnirs)
picks = pick_types(info, meg=('sensors' in meg),
ref_meg=('ref' in meg), eeg=(len(eeg) > 0),
ecog=ecog, seeg=seeg, dbs=dbs,
fnirs=(len(fnirs) > 0))
head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0]
del trans
# get transforms to "mri"window
to_cf_t = _get_transforms_to_coord_frame(
info, head_mri_t, coord_frame='mri')
if pick_types(info, eeg=True, exclude=()).size > 0 and \
'projected' in eeg:
head_surf = _get_head_surface(
'seghead', self._subject_id, self._subjects_dir)
else:
head_surf = None
# Do the main plotting
for _ in self._iter_views('vol'):
if picks.size > 0:
sensors_actors = _plot_sensors(
self._renderer, info, to_cf_t, picks, meg, eeg,
fnirs, warn_meg, head_surf, self._units)
for item, actors in sensors_actors.items():
for actor in actors:
self._add_actor(item, actor)
if 'helmet' in meg and pick_types(info, meg=True).size > 0:
surf = get_meg_helmet_surf(info, head_mri_t)
verts = surf['rr'] * (1 if self._units == 'm' else 1e3)
actor, _ = self._renderer.mesh(
*verts.T, surf['tris'],
color=DEFAULTS['coreg']['helmet_color'],
opacity=0.25, reset_camera=False, render=False)
self._add_actor('helmet', actor)
self._renderer._update()
def remove_sensors(self, kind=None):
"""Remove sensors from the rendered scene.
Parameters
----------
kind : str | list | None
If None, removes all sensor-related data including the helmet.
Can be "meg", "eeg", "fnirs", "ecog", "seeg", "dbs" or "helmet"
to remove that item.
"""
all_kinds = ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet')
if kind is None:
for item in all_kinds:
self._remove(item, render=False)
else:
if isinstance(kind, str):
kind = [kind]
for this_kind in kind:
_check_option('kind', this_kind, all_kinds)
self._remove(this_kind, render=False)
self._renderer._update()
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=0, col=0, font_size=None, justification=None):
"""Add a text to the visualization.
Parameters
----------
x : float
X coordinate.
y : float
Y coordinate.
text : str
Text to add.
name : str
Name of the text (text label can be updated using update_text()).
color : tuple
Color of the text. Default is the foreground color set during
initialization (default is black or white depending on the
background color).
opacity : float
Opacity of the text (default 1.0).
row : int | None
Row index of which brain to use. Default is the top row.
col : int | None
Column index of which brain to use. Default is the left-most
column.
font_size : float | None
The font size to use.
justification : str | None
The text justification.
"""
_validate_type(name, (str, None), 'name')
name = text if name is None else name
if 'text' in self._actors and name in self._actors['text']:
raise ValueError(f'Text with the name {name} already exists')
for ri, ci, _ in self._iter_views('vol'):
if (row is None or row == ri) and (col is None or col == ci):
actor = self._renderer.text2d(
x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
if 'text' not in self._actors:
self._actors['text'] = dict()
self._actors['text'][name] = actor
def remove_text(self, name=None):
"""Remove text from the rendered scene.
Parameters
----------
name : str | None
Remove specific text by name. If None, all text will be removed.
"""
_validate_type(name, (str, None), 'name')
if name is None:
for actor in self._actors['text'].values():
self._renderer.plotter.remove_actor(actor)
self._actors.pop('text')
else:
names = [None]
if 'text' in self._actors:
names += list(self._actors['text'].keys())
_check_option('name', name, names)
self._renderer.plotter.remove_actor(
self._actors['text'][name])
self._actors['text'].pop(name)
self._renderer._update()
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line(update=False)
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject_id,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None):
"""Add an annotation file.
Parameters
----------
annot : str | tuple
Either path to annotation file or annotation name. Alternatively,
the annotation can be specified as a ``(labels, ctab)`` tuple per
hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere
or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both
hemispheres. ``labels`` and ``ctab`` should be arrays as returned
by :func:`nibabel.freesurfer.io.read_annot`.
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
alpha : float
Opacity of the head surface. Must be between 0 and 1 (inclusive).
Default is 0.5.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
color : matplotlib-style color code
If used, show all annotations in the same (specified) color.
Probably useful only when showing annotation borders.
"""
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
rgb = np.round(np.multiply(_to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for _ in self._iter_views(hemi):
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
self._renderer._set_colormap_range(
mesh._actor, cmap.astype(np.uint8), None)
self._renderer._update()
def close(self):
"""Close all figures and cleanup data structure."""
self._closed = True
self._renderer.close()
def show(self):
"""Display the window."""
self._renderer.show()
@fill_doc
def show_view(self, view=None, roll=None, distance=None, *,
row=None, col=None, hemi=None, align=True,
azimuth=None, elevation=None, focalpoint=None):
"""Orient camera to display view.
Parameters
----------
%(view)s
%(roll)s
%(distance)s
row : int | None
The row to set. Default all rows.
col : int | None
The column to set. Default all columns.
hemi : str | None
Which hemi to use for view lookup (when in "both" mode).
align : bool
If True, consider view arguments relative to canonical MRI
directions (closest to MNI for the subject) rather than native MRI
space. This helps when MRIs are not in standard orientation (e.g.,
have large rotations).
%(azimuth)s
%(elevation)s
%(focalpoint)s
"""
_validate_type(row, ('int-like', None), 'row')
_validate_type(col, ('int-like', None), 'col')
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
_validate_type(view, (str, None), 'view')
view_params = dict(azimuth=azimuth, elevation=elevation, roll=roll,
distance=distance, focalpoint=focalpoint)
if view is not None: # view_params take precedence
view_params = {param: val for param, val in view_params.items()
if val is not None} # no overwriting with None
view_params = dict(views_dicts[hemi].get(view), **view_params)
xfm = self._rigid if align else None
for h in self._hemis:
for ri, ci, _ in self._iter_views(h):
if (row is None or row == ri) and (col is None or col == ci):
self._renderer.set_camera(
**view_params, reset_camera=False, rigid=xfm)
self._renderer._update()
def reset_view(self):
"""Reset the camera."""
for h in self._hemis:
for _, _, v in self._iter_views(h):
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename=None, mode='rgb'):
"""Save view from all panels to disk.
Parameters
----------
filename : str
Path to new image file.
mode : str
Either 'rgb' or 'rgba' for values to return.
"""
if filename is None:
filename = _generate_default_filename(".png")
_save_ndarray_img(
filename, self.screenshot(mode=mode, time_viewer=True))
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
"""Generate a screenshot of current view.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
%(brain_screenshot_time_viewer)s
Returns
-------
screenshot : array
Image pixel values.
"""
n_channels = 3 if mode == 'rgb' else 4
img = self._renderer.screenshot(mode)
logger.debug(f'Got screenshot of size {img.shape}')
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
from matplotlib.image import imread
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
fig = self.mpl_canvas.fig
with BytesIO() as output:
# Need to pass dpi here so it uses the physical (HiDPI) DPI
# rather than logical DPI when saving in most cases.
# But when matplotlib uses HiDPI and VTK doesn't
# (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,
# so let's just calculate the DPI we need to get
# the correct size output based on the widths being equal
size_in = fig.get_size_inches()
dpi = fig.get_dpi()
want_size = tuple(x * dpi for x in size_in)
n_pix = want_size[0] * want_size[1]
logger.debug(
f'Saving figure of size {size_in} @ {dpi} DPI '
f'({want_size} = {n_pix} pixels)')
# Sometimes there can be off-by-one errors here (e.g.,
# if in mpl int() rather than int(round()) is used to
# compute the number of pixels) so rather than use "raw"
# format and try to reshape ourselves, just write to PNG
# and read it, which has the dimensions encoded for us.
fig.savefig(output, dpi=dpi, format='png',
facecolor=self._bg_color, edgecolor='none')
output.seek(0)
trace_img = imread(output, format='png')[:, :, :n_channels]
trace_img = np.clip(
np.round(trace_img * 255), 0, 255).astype(np.uint8)
bgcolor = np.array(self._brain_color[:n_channels]) / 255
img = concatenate_images([img, trace_img], bgcolor=bgcolor,
n_channels=n_channels)
return img
@contextlib.contextmanager
def _no_lut_update(self, why):
orig = self._lut_locked
self._lut_locked = why
try:
yield
finally:
self._lut_locked = orig
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):
"""Update color map.
Parameters
----------
%(fmin_fmid_fmax)s
alpha : float | None
Alpha to use in the update.
"""
args = f'{fmin}, {fmid}, {fmax}, {alpha}'
if self._lut_locked is not None:
logger.debug(f'LUT update postponed with {args}')
return
logger.debug(f'Updating LUT with {args}')
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}
_update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)
assert all(val is not None for val in lims.values())
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
# update our values
rng = self._cmap_range
ctable = self._data['ctable']
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'],
opacity=alpha,
rng=rng)
self._renderer._set_colormap_range(
mesh._actor, ctable, self._scalar_bar, rng,
self._brain_color)
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
self._renderer._set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
self._scalar_bar, rng)
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
self._renderer._set_colormap_range(
glyph_actor_, ctable, self._scalar_bar, rng)
if self.time_viewer:
with self._no_lut_update(f'update_lut {args}'):
for key in ('fmin', 'fmid', 'fmax'):
self.callbacks[key](lims[key])
self._renderer._update()
def set_data_smoothing(self, n_steps):
"""Set the number of smoothing steps.
Parameters
----------
n_steps : int
Number of smoothing steps.
"""
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == -1 else n_steps
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps=None, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
"""The interpolation mode."""
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
"""Set the interpolation mode.
Parameters
----------
%(brain_time_interpolation)s
"""
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
"""Set the time point shown (can be a float to interpolate).
Parameters
----------
time_idx : int | float
The time index to use. Can be a float to use interpolation
between indices.
"""
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
# interpolate in time
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
# update the volume interpolation
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
_cell_data(grid)['values'].fill(fill)
# XXX for sided data, we probably actually need two
# volumes as composite/MIP needs to look at two
# extremes... for now just use abs. Eventually we can add
# two volumes if we want.
_cell_data(grid)['values'][vertices] = values
# interpolate in space
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
# update the mesh scalar values
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
# update the glyphs
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._renderer._update()
def set_time(self, time):
"""Set the time to display (in seconds).
Parameters
----------
time : float
The time to show, in seconds.
"""
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for _ in self._iter_views(hemi):
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
_point_data(glyph_dataset)['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = self._renderer._actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor, render=False)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
self._renderer._set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
"""Scale the colorbar points."""
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
"""Return the interpolated time index of the given time value."""
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
"""Data used by time viewer and color bar widgets."""
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
with self._renderer._disabled_interaction():
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
# find imageio FFMPEG parameters
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False,
**kwargs):
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.set_value(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self._renderer._status_bar_update()
else:
self.status_msg.set_value(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.set_range([0, n_frames - 1])
self.status_progress.set_value(frame)
self.status_progress.update()
self.status_msg.update()
self._renderer._status_bar_update()
# set cursor to busy
default_cursor = self._renderer._window_get_cursor()
self._renderer._window_set_cursor(
self._renderer._window_new_cursor("WaitCursor"))
try:
self._save_movie(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, frame_callback, time_viewer, **kwargs)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
finally:
self._renderer._window_set_cursor(default_cursor)
@fill_doc
def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
"""Save a movie (for data with a time axis).
The movie is created through the :mod:`imageio` module. The format is
determined by the extension, and additional options can be specified
through keyword arguments that depend on the format, see
:doc:`imageio's format page <imageio:formats/index>`.
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
filename : str
Path at which to save the movie. The extension determines the
format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`
documentation for available formats).
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
%(brain_time_interpolation)s
If None, it uses the current ``brain.interpolation``,
which defaults to ``'nearest'``. Defaults to None.
codec : str | None
The codec to use.
bitrate : float | None
The bitrate to use.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed keyword arguments ``frame`` and
``n_frames``.
%(brain_screenshot_time_viewer)s
**kwargs : dict
Specify additional options for :mod:`imageio`.
"""
if filename is None:
filename = _generate_default_filename(".mp4")
func = self._save_movie_tv if self.time_viewer else self._save_movie
func(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
# find tmin
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
# Sometimes the first screenshot is rendered with a different
# resolution on OS X
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
"""Iterate through time points, then reset to current time.
Parameters
----------
time_idx : array_like
Time point indexes through which to iterate.
callback : callable | None
Callback to call before yielding each frame.
Yields
------
idx : int | float
Current index.
Notes
-----
Used by movie and image sequence saving functions.
"""
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
# Restore original time index
func(current_time_idx)
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
"""Check for safe single-hemi input, returns str."""
_validate_type(hemi, (None, str), 'hemi')
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
hemi = self._hemi
_check_option('hemi', hemi, ('lh', 'rh') + tuple(extras))
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Convert a label/parc to borders."""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def enable_depth_peeling(self):
"""Enable depth peeling."""
self._renderer.enable_depth_peeling()
def get_picked_points(self):
"""Return the vertices of the picked points.
Returns
-------
points : list of int | None
The vertices picked by the time viewer.
"""
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
"""Hash the object."""
raise NotImplementedError
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
"""Work around interp1d not liking singleton dimensions."""
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims['fmin'] = fmin
if lims['fmax'] < fmin:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
lims['fmax'] = fmin
if lims['fmid'] < fmin:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
lims['fmid'] = fmin
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmid is not None:
lims['fmid'] = fmid
if lims['fmin'] > fmid:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
lims['fmin'] = fmid
if lims['fmax'] < fmid:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
lims['fmax'] = fmid
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmax is not None:
lims['fmax'] = fmax
if lims['fmin'] > fmax:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
lims['fmin'] = fmax
if lims['fmid'] > fmax:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
lims['fmid'] = fmax
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| drammock/mne-python | mne/viz/_brain/_brain.py | Python | bsd-3-clause | 151,930 |
#!/usr/bin/env python
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from stix.core import STIXPackage
def parse_stix(pkg):
print("== MALWARE ==")
for fam in pkg.ttps:
print("---")
print("Title : " + fam.title)
print("ID : " + fam.id_)
for sample in fam.behavior.malware_instances:
print("Sample: " + str(sample.names[0]))
print("Type: " + str(sample.types[0]))
for ind in pkg.indicators:
print("---")
print("Title : " + ind.title)
print("Type : " + str(ind.indicator_types[0]))
print("ID -> : " + ind.indicated_ttps[0].item.idref)
for obs in ind.observables:
for digest in obs.object_.properties.hashes:
print("Hash : " + str(digest))
return 0
if __name__ == '__main__':
try:
fname = sys.argv[1]
except:
exit(1)
fd = open(fname)
stix_pkg = STIXPackage.from_xml(fd)
parse_stix(stix_pkg)
| jburns12/stixproject.github.io | documentation/idioms/malware-hash/malware-indicator-for-file-hash_consumer.py | Python | bsd-3-clause | 1,039 |
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('ga')
J.url = 'http://georgia.gov'
| openstates/billy | billy2pupa/ga.py | Python | bsd-3-clause | 115 |
"""Must be kept even empty. That makes a Django app."""
| jgsogo/django-generic-filters | django_genericfilters/models.py | Python | bsd-3-clause | 56 |
#!/usr/bin/env python
"""Execute the tests for bs_tools.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for bs_tools'
print '========================='
print
##############################################################
### Casbar
##############################################################
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/bs_tools/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_bisar = app_tests.autolocateBinary(
binary_base, 'apps/bs_tools', 'bisar')
path_to_casbar = app_tests.autolocateBinary(
binary_base, 'apps/bs_tools', 'casbar')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
ph.outFile('-') # To ensure that the out path is set.
transforms = [
app_tests.ReplaceTransform(os.path.join(ph.source_base_path, 'apps/bs_tools/tests') + os.sep, '', right=True),
app_tests.ReplaceTransform(ph.temp_dir + os.sep, '', right=True),
app_tests.RegexpReplaceTransform(r'\tVN:[^\t]*', r'\tVN:VERSION', right=True, left=True)
]
# We prepare a list of transforms to apply to the output files. This is
# used to strip the input/output paths from the programs' output to
# make it more canonical and host independent.
# Transforms for SAM output format only. Make VN field of @PG header canonical.
#sam_transforms = [app_tests.RegexpReplaceTransform(r'\tVN:[^\t]*', r'\tVN:VERSION', right=True, left=True)]
# ============================================================
# se
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
# 0
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-e3', str(4), '-e4', str(5),
#-e3 4 -e4 5
'-o', ph.outFile('reads_se_N6000_0.CT_GA.verified.sam'),
ph.inFile('reads_se_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000.fastq')],
to_diff=[#(ph.inFile('STDOUT_FILE'),
#ph.outFile('STDOUT_FILE')),
(ph.inFile('reads_se_N6000_0.CT_GA.verified.sam'),
ph.outFile('reads_se_N6000_0.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
# 1
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-gas', str(-4.5), '-ges', str(-2.0), '-der', str(0.001), '-bsc', str(0.99), '-gmr', str(0.5), '-i', str(0.8), '-rn', str(0.001), '-pms', str(0.9), '-e3', str(4), '-e4', str(5),
# -gas -4.5 -ges -2.0 -der 0.001 -bsc 0.99 -gmr 0.5 -i 0.8 -rn 0.001 -pms 0.9 -mq 0 -e3 4 -e4 5
'-o', ph.outFile('reads_se_N6000_1.CT_GA.verified.sam'),
ph.inFile('reads_se_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000.fastq')],
to_diff=[#(ph.inFile('STDOUT_FILE'),
#ph.outFile('STDOUT_FILE')),
(ph.inFile('reads_se_N6000_1.CT_GA.verified.sam'),
ph.outFile('reads_se_N6000_1.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
# 2
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nse', '-nsi', '-nsd', '-gas', str(-4.5), '-ges', str(-2.0), '-der', str(0.001), '-bsc', str(0.99), '-gmr', str(0.5), '-i', str(0.8), '-rn', str(0.001), '-pms', str(0.9), '-e3', str(4), '-e4', str(5),
# -nse -nsi -nsd -gas -4.5 -ges -2.0 -der 0.001 -bsc 0.99 -gmr 0.5 -i 0.8 -rn 0.001 -pms 0.9 -mq 0 -e3 4 -e4 5
'-o', ph.outFile('reads_se_N6000_2.CT_GA.verified.sam'),
ph.inFile('reads_se_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000.fastq')],
to_diff=[(ph.inFile('reads_se_N6000_2.CT_GA.verified.sam'),
ph.outFile('reads_se_N6000_2.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
# 3
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nse', '-nsi', '-nsd', '-gas', str(-4.5), '-ges', str(-2.0), '-der', str(0.001), '-bsc', str(0.99), '-gmr', str(0.2), '-i', str(0.8), '-rn', str(0.001), '-pms', str(0.9), '-e3', str(4), '-e4', str(5),
# -nse -nsi -nsd -gas -4.5 -ges -2.0 -der 0.001 -bsc 0.99 -gmr 0.2 -i 0.8 -rn 0.001 -pms 0.9 -mq 0 -e3 4 -e4 5
'-o', ph.outFile('reads_se_N6000_3.CT_GA.verified.sam'),
ph.inFile('reads_se_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000.fastq')],
to_diff=[(ph.inFile('reads_se_N6000_3.CT_GA.verified.sam'),
ph.outFile('reads_se_N6000_3.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
# 4
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nse', '-nsi', '-nsd', '-gas', str(-4.5), '-ges', str(-2.0), '-der', str(0.001), '-bsc', str(0.99), '-gmr', str(0.8), '-i', str(0.8), '-rn', str(0.001), '-pms', str(0.9), '-e3', str(4), '-e4', str(5),
# -nse -nsi -nsd -gas -4.5 -ges -2.0 -der 0.001 -bsc 0.99 -gmr 0.8 -i 0.8 -rn 0.001 -pms 0.9 -mq 0 -e3 4 -e4 5
'-o', ph.outFile('reads_se_N6000_4.CT_GA.verified.sam'),
ph.inFile('reads_se_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000.fastq')],
to_diff=[(ph.inFile('reads_se_N6000_4.CT_GA.verified.sam'),
ph.outFile('reads_se_N6000_4.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
# ============================================================
# pe
# ============================================================
# 0
conf = app_tests.TestConf(
program=path_to_bisar,
redir_stdout=ph.outFile('other.stdout'),
args=['-e3', str(4), '-e4', str(5),
#-e3 4 -e4 5
'-o', ph.outFile('reads_pe_N6000_0.CT_GA.verified.sam'),
ph.inFile('reads_pe_N6000.CT_GA.sam'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_pe_N6000.L.fastq'),
ph.inFile('reads_pe_N6000.R.fastq')],
to_diff=[(ph.inFile('reads_pe_N6000_0.CT_GA.verified.sam'),
ph.outFile('reads_pe_N6000_0.CT_GA.verified.sam'),
transforms)])
conf_list.append(conf)
##############################################################
### Casbar
##############################################################
# 0
conf = app_tests.TestConf(
program=path_to_casbar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nec', '-mc', str(6), '-msc', str(5), '-mpc', str(0.5), '-hes', str(0.005),
'-o', ph.outFile('snps_se_0.vcf'),
'-b', ph.outFile('meths_se_0.bed'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000_2.CT_GA.verified.pos_so.sam')],
to_diff=[(ph.inFile('snps_se_0.vcf'),
ph.outFile('snps_se_0.vcf')),
(ph.inFile('meths_se_0.bed'),
ph.outFile('meths_se_0.bed'))])
conf_list.append(conf)
# 1
conf = app_tests.TestConf(
program=path_to_casbar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nec', '-mc', str(2), '-msc', str(3), '-mpc', str(0.5), '-hes', str(0.005),
'-o', ph.outFile('snps_se_1.vcf'),
'-b', ph.outFile('meths_se_1.bed'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_se_N6000_2.CT_GA.verified.pos_so.sam')],
to_diff=[(ph.inFile('snps_se_1.vcf'),
ph.outFile('snps_se_1.vcf')),
(ph.inFile('meths_se_1.bed'),
ph.outFile('meths_se_1.bed'))])
conf_list.append(conf)
# ============================================================
# pe
# ============================================================
# 0
conf = app_tests.TestConf(
program=path_to_casbar,
redir_stdout=ph.outFile('other.stdout'),
args=['-nec', '-mc', str(6), '-msc', str(5), '-mpc', str(0.5), '-hes', str(0.005),
'-o', ph.outFile('snps_pe_0.vcf'),
'-b', ph.outFile('meths_pe_0.bed'),
ph.inFile('hg18_chr21_3000.fa'),
ph.inFile('reads_pe_N6000_0.CT_GA.verified.pos_so.sam')],
to_diff=[(ph.inFile('snps_pe_0.vcf'),
ph.outFile('snps_pe_0.vcf')),
(ph.inFile('meths_pe_0.bed'),
ph.outFile('meths_pe_0.bed'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
# Output to the user.
print ' '.join([os.path.basename(conf.program)] + conf.args)
res = app_tests.runTest(conf)
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| xp3i4/seqan | apps/bs_tools/tests/run_tests.py | Python | bsd-3-clause | 10,973 |
# Copyright (C) 2004 CCLRC & NERC( Natural Environment Research Council ).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later. http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
na_to_nc.py
===========
Contains the NAToNC class for converting a NASA Ames file to a NetCDF file.
"""
# Imports from python standard library
import logging
# Imports from external packages
try:
import cdms2 as cdms
except:
try:
import cdms
except:
raise Exception("Could not import third-party software. Nappy requires the CDMS and Numeric packages to be installed to convert to CDMS and NetCDF.")
# Import from nappy package
import nappy.nc_interface.na_to_cdms
from nappy.na_error import na_error
logging.basicConfig()
log = logging.getLogger(__name__)
class NAToNC(nappy.nc_interface.na_to_cdms.NADictToCdmsObjects):
"""
Converts a NASA Ames file to a NetCDF file.
"""
def __init__(self, na_file, variables=None, aux_variables=None,
global_attributes=[("Conventions","CF-1.0")],
time_units=None, time_warning=True,
rename_variables={}):
"""
Sets up instance variables. Note that the argument 'na_file' has a relaxes definition
and can be either a NASA Ames file object or the name of a NASA AMES file.
Typical usage is:
>>> import nappy.nc_interface.na_to_nc as na_to_nc
>>> c = na_to_nc.NAToNC("old_file.na")
>>> c.convert()
>>> c.writeNCFile("new_file.nc")
"""
# First open na_file if it is a file rather than an na_file object
na_file_obj = na_file
print na_file_obj, type(na_file_obj)
if type(na_file_obj) == type("string"):
na_file_obj = nappy.openNAFile(na_file_obj)
nappy.nc_interface.na_to_cdms.NADictToCdmsObjects.__init__(self, na_file_obj, variables=variables,
aux_variables=aux_variables,
global_attributes=global_attributes,
time_units=time_units, time_warning=time_warning,
rename_variables=rename_variables)
def writeNCFile(self, file_name, mode="w"):
"""
Writes the NASA Ames content that has been converted into CDMS objects to a
NetCDF file of name 'file_name'. Note that mode can be set to append so you
can add the data to an existing file.
"""
if not self.converted:
self.convert()
# Create CDMS output file object
fout = cdms.open(file_name, mode=mode)
# Write main variables
for var in self.cdms_variables:
fout.write(var)
# Write aux variables
for avar in self.cdms_aux_variables:
fout.write(avar)
# Write global attributes
for (att, value) in self.global_attributes:
setattr(fout, att, value)
fout.close()
log.info("NetCDF file '%s' written successfully." % file_name)
return True
| eufarn7sp/egads-eufar | egads/thirdparty/nappy/nc_interface/na_to_nc.py | Python | bsd-3-clause | 3,191 |
from django.contrib.sitemaps import Sitemap
from events.models import Event
class EventsSitemap(Sitemap):
changefreq = "never"
priority = 1.0
def items(self):
return Event.objects.public()
def lastmod(self, obj):
return obj.date_modified | asgardproject/asgard-calendar | events/sitemaps.py | Python | bsd-3-clause | 251 |
#
# This file is part of GreatFET
#
#
# FIXME: rewrite to be a compliant GreatFET JTAGDevice.
#
from itertools import chain
from ..interface import GreatFETInterface
from ..programmer import GreatFETProgrammer
class JTAG_MSP430(GreatFETProgrammer):
MSP430_ident = 0x00
def __init__(self, board):
"""
Initialize a new MSP430 JTAG instance.
Args:
board -- The GreatFET board connected to the target.
"""
self.board = board
def start(self):
"""Initialise the JTAG hardware and target device."""
self.JTAGID = self.board.apis.jtag_msp430.start()
if(not (self.JTAGID==0x89 or self.JTAGID==0x91)):
#Try once more
self.JTAGID = self.board.apis.jtag_msp430.start()
if self.JTAGID in (0x89, 0x91):
self.halt_cpu()
return self.JTAGID
def stop(self):
"""Stop debugging."""
self.board.apis.jtag_msp430.stop()
def peek(self, address, length=2):
"""
Read a word at an address.
Args:
address -- The memory address to read from the target.
length -- Number of bytes to read.
"""
return self.board.apis.jtag_msp430.read_mem(address, length)
def peek_block(self, address, block_size=0x400):
"""Grab a large block from an SPI Flash ROM."""
data = self.peek(address, block_size)
byte_pairs = [(x&0xFF, (x&0xFF00)>>8) for x in data]
data_bytes = bytes(chain.from_iterable(byte_pairs))
return data_bytes
def poke(self, address, value):
"""
Write the contents of memory at an address.
Args:
address -- The memory address to be written.
value -- Value to write to location.
"""
return self.board.apis.jtag_msp430.write_mem(address, value)
def poke_flash_block(self, address, data):
"""
Write the contents of flash memory at an address.
Args:
address -- The memory address to be written.
data -- Words to write to flash
"""
value = self.board.apis.jtag_msp430.write_flash(address, data, timeout=30000)
return value
def poke_flash(self, address, value):
"""
Write a single word to flash at an address.
Args:
address -- The memory address to be written.
value -- Valuse to write to location
"""
value = self.poke_flash_block(address, (value,))
return value
def set_secret(self,value):
"""Set a secret word for later retreival. Used by glitcher."""
self.poke_flash(0xFFFE, value)
def get_secret(self):
"""Get a secret word. Used by glitcher."""
return self.peek(0xfffe)
def halt_cpu(self):
"""Halt the CPU."""
self.board.apis.jtag_msp430.halt_cpu()
def release_cpu(self):
"""Resume the CPU."""
self.board.apis.jtag_msp430.relase_cpu()
def set_instruction_fetch(self):
"""Set the instruction fetch mode."""
self.board.apis.jtag_msp430.set_instruction_fetch()
def ident(self):
"""Fetch self-identification word from 0x0FF0 as big endian."""
if self.MSP430_ident == 0x00:
if self.JTAGID == 0x89:
i=self.peek(0x0ff0)
if self.JTAGID == 0x91 :
i=self.peek(0x1A04)
if len(i) >= 1:
self.MSP430_ident = ((i[0]&0xFF00)>>8)+((i[0]&0xFF)<<8)
return self.MSP430_ident
devices = {
#MSP430F2xx
0xf227: "MSP430F22xx",
0xf213: "MSP430F21x1",
0xf249: "MSP430F24x",
0xf26f: "MSP430F261x",
0xf237: "MSP430F23x0",
0xf201: "MSP430F201x",
#Are G's and F's distinct?
0x2553: "MSP430G2553",
#MSP430F1xx
0xf16c: "MSP430F161x",
0xf149: "MSP430F13x", #or f14x(1)
0xf112: "MSP430F11x", #or f11x1
0xf143: "MSP430F14x",
0xf112: "MSP430F11x", #or F11x1A
0xf123: "MSP430F1xx", #or F123x
0x1132: "MSP430F1122", #or F1132
0x1232: "MSP430F1222", #or F1232
0xf169: "MSP430F16x",
#MSP430F4xx
0xF449: "MSP430F43x", #or F44x
0xF427: "MSP430FE42x", #or FW42x, F415, F417
0xF439: "MSP430FG43x",
0xf46f: "MSP430FG46xx", #or F471xx
0xF413: "MSP430F413", #or maybe others.
}
def ident_string(self):
"""Grab model string."""
return self.devices.get(self.ident())
def erase_flash(self):
"""Erase MSP430 flash memory."""
self.board.apis.jtag_msp430.erase_flash()
def erase_info(self):
"""Erase MSP430 info flash."""
self.board.apis.jtag_msp430.erase_info()
def set_pc(self, pc):
"""Set the program counter."""
self.board.apis.jtag_msp430.set_pc(pc)
def set_reg(self,reg,val):
"""Set a register."""
self.board.apis.jtag_msp430.set_reg(reg, val)
def get_reg(self,reg):
"""Get a register."""
return self.board.apis.jtag_msp430.get_reg(reg)
def run(self):
"""Reset the MSP430 to run on its own."""
self.board.apis.jtag_msp430.release_cpu()
def dump_bsl(self):
self.dump_memory(0xC00, 0xfff)
def dump_all_memory(self):
self.dump_memory(0x200, 0xffff)
def dump_memory(self, begin, end):
i=begin
while i<end:
print("%04x %04x" % (i, self.peek(i)))
i+=2
| dominicgs/GreatFET-experimental | host/greatfet/programmers/msp430.py | Python | bsd-3-clause | 5,615 |
import os
from django.conf import settings
class BorgConfiguration():
@staticmethod
def initialize():
setattr(BorgConfiguration,"DEBUG",getattr(settings,"DEBUG",False))
config = getattr(settings,"HARVEST_CONFIG")
if not config:
config = {}
for name, value in config.iteritems():
setattr(BorgConfiguration, name, value)
setattr(BorgConfiguration,"TEST_INPUT_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.INPUT_SCHEMA))
setattr(BorgConfiguration,"TEST_NORMAL_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.NORMAL_SCHEMA))
setattr(BorgConfiguration,"TEST_TRANSFORM_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.TRANSFORM_SCHEMA))
@staticmethod
def test_schema(schema):
return "test_" + schema
BorgConfiguration.initialize()
#import ipdb;ipdb.set_trace()
| rockychen-dpaw/borgcollector | borg_utils/borg_config.py | Python | bsd-3-clause | 890 |
import locale
from atom.ext.guardian.views import RaisePermissionRequiredMixin
from braces.views import (
FormValidMessageMixin,
LoginRequiredMixin,
SelectRelatedMixin,
UserFormKwargsMixin,
)
from cached_property import cached_property
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_text
from django.utils.html import mark_safe
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from django.views.generic import (
ArchiveIndexView,
CreateView,
MonthArchiveView,
UpdateView,
)
from django.views.generic.list import BaseListView
from poradnia.cases.models import Case
from poradnia.keys.mixins import KeyAuthMixin
from poradnia.users.utils import PermissionMixin
from .forms import EventForm
from .models import Event
from .utils import EventCalendar
class EventCreateView(
RaisePermissionRequiredMixin, UserFormKwargsMixin, FormValidMessageMixin, CreateView
):
model = Event
form_class = EventForm
template_name = "events/form.html"
permission_required = ["cases.can_add_record"]
@cached_property
def case(self):
return get_object_or_404(Case, pk=self.kwargs["case_pk"])
def get_permission_object(self):
return self.case
def get_form_kwargs(self, *args, **kwargs):
kwargs = super().get_form_kwargs()
kwargs["case"] = self.case
return kwargs
def get_form_valid_message(self):
return _("Success added new event %(event)s") % ({"event": self.object})
class EventUpdateView(
RaisePermissionRequiredMixin, UserFormKwargsMixin, FormValidMessageMixin, UpdateView
):
model = Event
form_class = EventForm
template_name = "events/form.html"
permission_required = ["cases.can_add_record"]
def get_permission_object(self):
return self._object.case
@cached_property
def _object(self):
return super().get_object()
def get_object(self, *args, **kwargs):
return self._object
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["case"] = self.object.case
return kwargs
def form_valid(self, form):
self.object.reminder_set.all().update(active=False)
return super().form_valid(form)
def get_form_valid_message(self):
return _("Success updated event %(event)s") % {"event": self.object}
class CalendarListView(PermissionMixin, LoginRequiredMixin, ArchiveIndexView):
model = Event
date_field = "time"
allow_future = True
date_list_period = "month"
class CalendarEventView(
PermissionMixin, SelectRelatedMixin, LoginRequiredMixin, MonthArchiveView
):
model = Event
date_field = "time"
allow_future = True
select_related = ["case", "record"]
template_name = "events/calendar.html"
def get_language_code(self):
return getattr(self.request, "LANGUAGE_CODE", settings.LANGUAGE_CODE)
def get_user_locale(self):
if self.get_language_code() in locale.locale_alias:
name = locale.locale_alias[self.get_language_code()].split(".")[0]
return (name, "UTF-8")
else:
return locale.getlocale()
def get_calendar(self):
date = (int(self.get_year()), int(self.get_month()))
cal = EventCalendar(self.object_list).formatmonth(*date)
return mark_safe(cal)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["calendar"] = self.get_calendar()
return context
class ICalendarView(KeyAuthMixin, PermissionMixin, BaseListView):
window = 1
model = Event
def get_event(self, obj):
from icalendar import Event
event = Event()
event["uid"] = obj.pk
event["dtstart"] = obj.time
event["summary"] = force_text(obj)
event["description"] = obj.text
return event
def get_subcomponents(self):
return [self.get_event(x) for x in self.get_queryset()]
def get_icalendar(self):
from icalendar import Calendar
cal = Calendar()
cal["summary"] = "Events for {}".format(self.request.user)
cal["dtstart"] = self.get_start()
cal["dtend"] = self.get_end()
for component in self.get_subcomponents():
cal.add_component(component)
return cal
def get_start(self):
return now() + relativedelta(months=+self.window)
def get_end(self):
return now() + relativedelta(months=-self.window)
def get_queryset(self):
qs = super().get_queryset()
qs = qs.filter(time__lt=self.get_start())
qs = qs.filter(time__gt=self.get_end())
return qs
def render_to_response(self, *args, **kwargs):
response = HttpResponse(content_type="application/force-download")
response["Content-Disposition"] = "attachment; filename=calendar.ics"
response.write(self.get_icalendar().to_ical())
return response
| watchdogpolska/poradnia.siecobywatelska.pl | poradnia/events/views.py | Python | bsd-3-clause | 5,142 |
import sys
from rpython.rlib.unroll import unrolling_iterable
SEND_EFFECT = 0xFF
ARRAY_EFFECT = 0xFE
BLOCK_EFFECT = 0xFD
UNPACK_EFFECT = 0xFC
# Name, number of arguments, stack effect
BYTECODES = [
("LOAD_SELF", 0, +1),
("LOAD_SCOPE", 0, +1),
("LOAD_BLOCK", 0, +1),
("LOAD_CODE", 0, +1),
("LOAD_CONST", 1, +1),
("LOAD_DEREF", 1, +1),
("STORE_DEREF", 1, 0),
("LOAD_CLOSURE", 1, +1),
("LOAD_CONSTANT", 1, 0),
("STORE_CONSTANT", 1, 0),
("DEFINED_CONSTANT", 1, 0),
("LOAD_LOCAL_CONSTANT", 1, +1),
("DEFINED_LOCAL_CONSTANT", 1, 0),
("LOAD_INSTANCE_VAR", 1, 0),
("STORE_INSTANCE_VAR", 1, -1),
("DEFINED_INSTANCE_VAR", 1, 0),
("LOAD_CLASS_VAR", 1, 0),
("STORE_CLASS_VAR", 1, -1),
("DEFINED_CLASS_VAR", 1, 0),
("LOAD_GLOBAL", 1, +1),
("STORE_GLOBAL", 1, 0),
("DEFINED_GLOBAL", 1, +1),
("BUILD_ARRAY", 1, ARRAY_EFFECT),
("BUILD_ARRAY_SPLAT", 1, ARRAY_EFFECT),
("BUILD_STRING", 1, ARRAY_EFFECT),
("BUILD_HASH", 0, +1),
("BUILD_RANGE", 0, -1),
("BUILD_RANGE_EXCLUSIVE", 0, -1),
("BUILD_FUNCTION", 0, -1),
("BUILD_BLOCK", 1, BLOCK_EFFECT),
("BUILD_LAMBDA", 0, 0),
("BUILD_CLASS", 0, -2),
("BUILD_MODULE", 0, -1),
("BUILD_REGEXP", 0, -1),
("COERCE_ARRAY", 1, 0),
("COERCE_BLOCK", 0, 0),
("COERCE_STRING", 0, 0),
("UNPACK_SEQUENCE", 1, UNPACK_EFFECT),
("UNPACK_SEQUENCE_SPLAT", 2, UNPACK_EFFECT),
("DEFINE_FUNCTION", 0, -2),
("ATTACH_FUNCTION", 0, -2),
("EVALUATE_MODULE", 0, -1),
("LOAD_SINGLETON_CLASS", 0, 0),
("SEND", 2, SEND_EFFECT),
("SEND_BLOCK", 2, SEND_EFFECT),
("SEND_SPLAT", 2, SEND_EFFECT),
("SEND_BLOCK_SPLAT", 2, SEND_EFFECT),
("DEFINED_METHOD", 1, 0),
("SEND_SUPER_BLOCK", 2, SEND_EFFECT),
("SEND_SUPER_BLOCK_SPLAT", 2, SEND_EFFECT),
("DEFINED_SUPER", 1, 0),
("SETUP_LOOP", 1, 0),
("SETUP_EXCEPT", 1, 0),
("SETUP_FINALLY", 1, 0),
("END_FINALLY", 0, -2),
("POP_BLOCK", 0, 0),
("JUMP", 1, 0),
("JUMP_IF_TRUE", 1, -1),
("JUMP_IF_FALSE", 1, -1),
("DISCARD_TOP", 0, -1),
("DUP_TOP", 0, +1),
("DUP_TWO", 0, +2),
("ROT_TWO", 0, 0),
("ROT_THREE", 0, 0),
("YIELD", 1, ARRAY_EFFECT),
("YIELD_SPLAT", 1, SEND_EFFECT),
("DEFINED_YIELD", 0, +1),
("RETURN", 0, -1),
("RAISE_RETURN", 0, -1),
("CONTINUE_LOOP", 1, -1),
("BREAK_LOOP", 0, -1),
("RAISE_BREAK", 0, -1),
]
BYTECODE_NAMES = []
BYTECODE_NUM_ARGS = []
BYTECODE_STACK_EFFECT = []
module = sys.modules[__name__]
for i, (name, num_args, stack_effect) in enumerate(BYTECODES):
setattr(module, name, i)
BYTECODE_NAMES.append(name)
BYTECODE_NUM_ARGS.append(num_args)
BYTECODE_STACK_EFFECT.append(stack_effect)
UNROLLING_BYTECODES = unrolling_iterable(enumerate(BYTECODE_NAMES))
| kachick/topaz | topaz/consts.py | Python | bsd-3-clause | 2,843 |
# -*- coding: utf-8 -*-
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
# Alan Leggitt <alan.leggitt@ucsf.edu>
# Alexandre Barachant <alexandre.barachant@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import inspect
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
from scipy.io import loadmat
import pytest
from mne import pick_types, Annotations
from mne.datasets import testing
from mne.utils import run_tests_if_main, requires_pandas
from mne.io import read_raw_edf, read_raw_bdf
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.edf.edf import _get_edf_default_event_id
from mne.io.edf.edf import _read_annotations_edf
from mne.io.edf.edf import _read_ch
from mne.io.edf.edf import _parse_prefilter_string
from mne.io.pick import channel_indices_by_type
from mne.annotations import events_from_annotations, read_annotations
from mne.io.meas_info import _kind_dict as _KIND_DICT
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts') # XXX: missing reader
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
duplicate_channel_labels_path = op.join(data_dir,
'duplicate_channel_labels.edf')
edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
data_path = testing.data_path(download=False)
edf_stim_resamp_path = op.join(data_path, 'EDF', 'test_edf_stim_resamp.edf')
edf_overlap_annot_path = op.join(data_path, 'EDF',
'test_edf_overlapping_annotations.edf')
edf_reduced = op.join(data_path, 'EDF', 'test_reduced.edf')
bdf_stim_channel_path = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')
bdf_multiple_annotations_path = op.join(data_path, 'BDF',
'multiple_annotation_chans.bdf')
test_generator_bdf = op.join(data_path, 'BDF', 'test_generator_2.bdf')
test_generator_edf = op.join(data_path, 'EDF', 'test_generator_2.edf')
edf_annot_sub_s_path = op.join(data_path, 'EDF', 'subsecond_starttime.edf')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_orig_units():
"""Test exposure of original channel units."""
raw = read_raw_edf(edf_path, preload=True)
# Test original units
orig_units = raw._orig_units
assert len(orig_units) == len(raw.ch_names)
assert orig_units['A1'] == 'µV' # formerly 'uV' edit by _check_orig_units
def test_bdf_data():
"""Test reading raw bdf files."""
raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path,
eog=eog, misc=misc,
exclude=['M2', 'IEOG'])
assert len(raw_py.ch_names) == 71
raw_py = _test_raw_reader(read_raw_bdf, input_fname=bdf_path,
montage='biosemi64', eog=eog, misc=misc,
exclude=['M2', 'IEOG'])
assert len(raw_py.ch_names) == 71
assert 'RawEDF' in repr(raw_py)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
# bdf saved as a single, resolution to seven decimal points in matlab
assert_array_almost_equal(data_py, data_eeglab, 8)
# Manually checking that float coordinates are imported
assert (raw_py.info['chs'][0]['loc']).any()
assert (raw_py.info['chs'][25]['loc']).any()
assert (raw_py.info['chs'][63]['loc']).any()
@testing.requires_testing_data
def test_bdf_crop_save_stim_channel(tmpdir):
"""Test EDF with various sampling rates."""
raw = read_raw_bdf(bdf_stim_channel_path)
raw.save(tmpdir.join('test-raw.fif'), tmin=1.2, tmax=4.0, overwrite=True)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [
edf_reduced,
edf_overlap_annot_path,
])
@pytest.mark.parametrize('stim_channel', (None, False, 'auto'))
def test_edf_others(fname, stim_channel):
"""Test EDF with various sampling rates and overlapping annotations."""
_test_raw_reader(
read_raw_edf, input_fname=fname, stim_channel=stim_channel,
verbose='error')
def test_edf_data_broken(tmpdir):
"""Test edf files."""
raw = _test_raw_reader(read_raw_edf, input_fname=edf_path,
exclude=['Ergo-Left', 'H10'], verbose='error')
raw_py = read_raw_edf(edf_path)
data = raw_py.get_data()
assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
# Test with number of records not in header (-1).
broken_fname = op.join(tmpdir, 'broken.edf')
with open(edf_path, 'rb') as fid_in:
fid_in.seek(0, 2)
n_bytes = fid_in.tell()
fid_in.seek(0, 0)
rbytes = fid_in.read()
with open(broken_fname, 'wb') as fid_out:
fid_out.write(rbytes[:236])
fid_out.write(b'-1 ')
fid_out.write(rbytes[244:244 + int(n_bytes * 0.4)])
with pytest.warns(RuntimeWarning,
match='records .* not match the file size'):
raw = read_raw_edf(broken_fname, preload=True)
read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True)
# Test with \x00's in the data
with open(broken_fname, 'wb') as fid_out:
fid_out.write(rbytes[:184])
assert rbytes[184:192] == b'36096 '
fid_out.write(rbytes[184:192].replace(b' ', b'\x00'))
fid_out.write(rbytes[192:])
raw_py = read_raw_edf(broken_fname)
data_new = raw_py.get_data()
assert_allclose(data, data_new)
def test_duplicate_channel_labels_edf():
"""Test reading edf file with duplicate channel names."""
EXPECTED_CHANNEL_NAMES = ['EEG F1-Ref-0', 'EEG F2-Ref', 'EEG F1-Ref-1']
with pytest.warns(RuntimeWarning, match='Channel names are not unique'):
raw = read_raw_edf(duplicate_channel_labels_path, preload=False)
assert raw.ch_names == EXPECTED_CHANNEL_NAMES
def test_parse_annotation(tmpdir):
"""Test parsing the tal channel."""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot_file = tmpdir.join('annotations.txt')
annot_file.write(annot)
annot = [a for a in bytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel_A = np.array(list(map(sum, zip(annot[0::2], annot[1::2]))),
dtype=np.int64)
with open(str(annot_file), 'rb') as fid:
# ch_data = np.fromfile(fid, dtype=np.int16, count=len(annot))
tal_channel_B = _read_ch(fid, subtype='EDF', dtype=np.int16,
samp=(len(annot) - 1) // 2,
dtype_byte='This_parameter_is_not_used')
for tal_channel in [tal_channel_A, tal_channel_B]:
onset, duration, description = _read_annotations_edf([tal_channel])
assert_equal(np.column_stack((onset, duration, description)),
[[180., 0., 'Lights off'], [180., 0., 'Close door'],
[180., 0., 'Lights off'], [180., 0., 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']])
def test_find_events_backward_compatibility():
"""Test if events are detected correctly in a typical MNE workflow."""
EXPECTED_EVENTS = [[68, 0, 2],
[199, 0, 2],
[1024, 0, 3],
[1280, 0, 2]]
# test an actual file
raw = read_raw_edf(edf_path, preload=True)
event_id = _get_edf_default_event_id(raw.annotations.description)
event_id.pop('start')
events_from_EFA, _ = events_from_annotations(raw, event_id=event_id,
use_rounding=False)
assert_array_equal(events_from_EFA, EXPECTED_EVENTS)
@requires_pandas
@pytest.mark.parametrize('fname', [edf_path, bdf_path])
def test_to_data_frame(fname):
"""Test EDF/BDF Raw Pandas exporter."""
ext = op.splitext(fname)[1].lstrip('.').lower()
if ext == 'edf':
raw = read_raw_edf(fname, preload=True, verbose='error')
elif ext == 'bdf':
raw = read_raw_bdf(fname, preload=True, verbose='error')
_, times = raw[0, :10]
df = raw.to_data_frame(index='time')
assert (df.columns == raw.ch_names).all()
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
assert 'time' in df.columns
assert_array_equal(df.values[:, 1], raw._data[0] * 1e13)
def test_read_raw_edf_stim_channel_input_parameters():
"""Test edf raw reader deprecation."""
_MSG = "`read_raw_edf` is not supposed to trigger a deprecation warning"
with pytest.warns(None) as recwarn:
read_raw_edf(edf_path)
assert all([w.category != DeprecationWarning for w in recwarn.list]), _MSG
for invalid_stim_parameter in ['EDF Annotations', 'BDF Annotations']:
with pytest.raises(ValueError,
match="stim channel is not supported"):
read_raw_edf(edf_path, stim_channel=invalid_stim_parameter)
def _assert_annotations_equal(a, b):
assert_array_equal(a.onset, b.onset)
assert_array_equal(a.duration, b.duration)
assert_array_equal(a.description, b.description)
assert a.orig_time == b.orig_time
def test_read_annot(tmpdir):
"""Test parsing the tal channel."""
EXPECTED_ANNOTATIONS = [[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']]
EXPECTED_ONSET = [180.0, 180.0, 180.0, 180.0, 3.14, 1800.2]
EXPECTED_DURATION = [0, 0, 0, 0, 4.2, 25.5]
EXPECTED_DESC = ['Lights off', 'Close door', 'Lights off', 'Close door',
'nothing', 'Apnea']
EXPECTED_ANNOTATIONS = Annotations(onset=EXPECTED_ONSET,
duration=EXPECTED_DURATION,
description=EXPECTED_DESC,
orig_time=None)
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot_file = tmpdir.join('annotations.txt')
annot_file.write(annot)
onset, duration, desc = _read_annotations_edf(annotations=str(annot_file))
annotation = Annotations(onset=onset, duration=duration, description=desc,
orig_time=None)
_assert_annotations_equal(annotation, EXPECTED_ANNOTATIONS)
# Now test when reading from buffer of data
with open(str(annot_file), 'rb') as fid:
ch_data = np.fromfile(fid, dtype=np.int16, count=len(annot))
onset, duration, desc = _read_annotations_edf([ch_data])
annotation = Annotations(onset=onset, duration=duration, description=desc,
orig_time=None)
_assert_annotations_equal(annotation, EXPECTED_ANNOTATIONS)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [test_generator_edf, test_generator_bdf])
def test_read_annotations(fname, recwarn):
"""Test IO of annotations from edf and bdf files via regexp."""
annot = read_annotations(fname)
assert len(annot.onset) == 2
def test_edf_prefilter_parse():
"""Test prefilter strings from header are parsed correctly."""
prefilter_basic = ["HP: 0Hz LP: 0Hz"]
highpass, lowpass = _parse_prefilter_string(prefilter_basic)
assert_array_equal(highpass, ["0"])
assert_array_equal(lowpass, ["0"])
prefilter_normal_multi_ch = ["HP: 1Hz LP: 30Hz"] * 10
highpass, lowpass = _parse_prefilter_string(prefilter_normal_multi_ch)
assert_array_equal(highpass, ["1"] * 10)
assert_array_equal(lowpass, ["30"] * 10)
prefilter_unfiltered_ch = prefilter_normal_multi_ch + [""]
highpass, lowpass = _parse_prefilter_string(prefilter_unfiltered_ch)
assert_array_equal(highpass, ["1"] * 10)
assert_array_equal(lowpass, ["30"] * 10)
prefilter_edf_specs_doc = ["HP:0.1Hz LP:75Hz N:50Hz"]
highpass, lowpass = _parse_prefilter_string(prefilter_edf_specs_doc)
assert_array_equal(highpass, ["0.1"])
assert_array_equal(lowpass, ["75"])
@testing.requires_testing_data
@pytest.mark.parametrize('fname', [test_generator_edf, test_generator_bdf])
def test_load_generator(fname, recwarn):
"""Test IO of annotations from edf and bdf files with raw info."""
ext = op.splitext(fname)[1][1:].lower()
if ext == 'edf':
raw = read_raw_edf(fname)
elif ext == 'bdf':
raw = read_raw_bdf(fname)
assert len(raw.annotations.onset) == 2
found_types = [k for k, v in
channel_indices_by_type(raw.info, picks=None).items()
if v]
assert len(found_types) == 1
events, event_id = events_from_annotations(raw)
ch_names = ['squarewave', 'ramp', 'pulse', 'ECG', 'noise', 'sine 1 Hz',
'sine 8 Hz', 'sine 8.5 Hz', 'sine 15 Hz', 'sine 17 Hz',
'sine 50 Hz']
assert raw.get_data().shape == (11, 120000)
assert raw.ch_names == ch_names
assert event_id == {'RECORD START': 2, 'REC STOP': 1}
assert_array_equal(events, [[0, 0, 2], [120000, 0, 1]])
@pytest.mark.parametrize('EXPECTED, test_input', [
pytest.param({'stAtUs': 'stim', 'tRigGer': 'stim', 'sine 1 Hz': 'eeg'},
'auto', id='auto'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'eeg'},
None, id='None'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
'sine 1 Hz', id='single string'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
2, id='single int'),
pytest.param({'stAtUs': 'eeg', 'tRigGer': 'eeg', 'sine 1 Hz': 'stim'},
-1, id='single int (revers indexing)'),
pytest.param({'stAtUs': 'stim', 'tRigGer': 'stim', 'sine 1 Hz': 'eeg'},
[0, 1], id='int list')])
def test_edf_stim_ch_pick_up(test_input, EXPECTED):
"""Test stim_channel."""
# This is fragile for EEG/EEG-CSD, so just omit csd
TYPE_LUT = {v[0]: k for k, v in _KIND_DICT.items() if k != 'csd'}
fname = op.join(data_dir, 'test_stim_channel.edf')
raw = read_raw_edf(fname, stim_channel=test_input)
ch_types = {ch['ch_name']: TYPE_LUT[ch['kind']] for ch in raw.info['chs']}
assert ch_types == EXPECTED
@testing.requires_testing_data
def test_bdf_multiple_annotation_channels():
"""Test BDF with multiple annotation channels."""
raw = read_raw_bdf(bdf_multiple_annotations_path)
assert len(raw.annotations) == 10
descriptions = np.array(['signal_start', 'EEG-check#1', 'TestStim#1',
'TestStim#2', 'TestStim#3', 'TestStim#4',
'TestStim#5', 'TestStim#6', 'TestStim#7',
'Ligths-Off#1'], dtype='<U12')
assert_array_equal(descriptions, raw.annotations.description)
run_tests_if_main()
@testing.requires_testing_data
def test_edf_lowpass_zero():
"""Test if a lowpass filter of 0Hz is mapped to the Nyquist frequency."""
with pytest.warns(RuntimeWarning, match='too long.*truncated'):
raw = read_raw_edf(edf_stim_resamp_path)
assert_allclose(raw.info["lowpass"], raw.info["sfreq"] / 2)
@testing.requires_testing_data
def test_edf_annot_sub_s_onset():
"""Test reading of sub-second annotation onsets."""
raw = read_raw_edf(edf_annot_sub_s_path)
assert_allclose(raw.annotations.onset, [1.951172, 3.492188])
| Teekuningas/mne-python | mne/io/edf/tests/test_edf.py | Python | bsd-3-clause | 16,927 |
# -*- coding: utf-8 -*-
"""
tests.templating
~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import logging
from jinja2 import TemplateNotFound
def test_context_processing():
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
assert rv.data == b'<p>23|42'
def test_original_win():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
assert rv.data == b'42'
def test_request_less_rendering():
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
assert rv == 'Hello Special World 42'
def test_standard_context():
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
assert rv.data.split() == [b'42', b'23', b'False', b'aha']
def test_escaping():
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
assert lines == [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
]
def test_no_escaping():
app = flask.Flask(__name__)
with app.test_request_context():
assert flask.render_template_string(
'{{ foo }}', foo='<test>') == '<test>'
assert flask.render_template('mail.txt', foo='<test>') == \
'<test> Mail'
def test_macros():
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
assert macro('World') == 'Hello World!'
def test_template_filter():
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_name():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_global():
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
assert 'get_stuff' in app.jinja_env.globals.keys()
assert app.jinja_env.globals['get_stuff'] == get_stuff
assert app.jinja_env.globals['get_stuff'](), 42
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
assert rv == '42'
def test_custom_template_loader():
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello Custom World!'
def test_iterable_loader():
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
assert rv.data == b'<h1>Jameson</h1>'
def test_templates_auto_reload():
app = flask.Flask(__name__)
assert app.config['TEMPLATES_AUTO_RELOAD']
assert app.jinja_env.auto_reload
app = flask.Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = False
assert not app.jinja_env.auto_reload
def test_template_loader_debugging(test_apps):
from blueprintapp import app
called = []
class _TestHandler(logging.Handler):
def handle(x, record):
called.append(True)
text = str(record.msg)
assert '1: trying loader of application "blueprintapp"' in text
assert ('2: trying loader of blueprint "admin" '
'(blueprintapp.apps.admin)') in text
assert ('trying loader of blueprint "frontend" '
'(blueprintapp.apps.frontend)') in text
assert 'Error: the template could not be found' in text
assert ('looked up from an endpoint that belongs to '
'the blueprint "frontend"') in text
assert 'See http://flask.pocoo.org/docs/blueprints/#templates' in text
with app.test_client() as c:
try:
old_load_setting = app.config['EXPLAIN_TEMPLATE_LOADING']
old_handlers = app.logger.handlers[:]
app.logger.handlers = [_TestHandler()]
app.config['EXPLAIN_TEMPLATE_LOADING'] = True
with pytest.raises(TemplateNotFound) as excinfo:
c.get('/missing')
assert 'missing_template.html' in str(excinfo.value)
finally:
app.logger.handlers[:] = old_handlers
app.config['EXPLAIN_TEMPLATE_LOADING'] = old_load_setting
assert len(called) == 1
| iFighting/flask | tests/test_templating.py | Python | bsd-3-clause | 11,202 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A script to test the vtkROIStencilSource
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetDataOrigin(0.0,0.0,0.0)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
shiftScale = vtk.vtkImageShiftScale()
shiftScale.SetInputConnection(reader.GetOutputPort())
shiftScale.SetScale(0.2)
shiftScale.Update()
roiStencil1 = vtk.vtkROIStencilSource()
roiStencil1.SetShapeToEllipsoid()
roiStencil1.SetBounds(20,300,80,150,0,0)
roiStencil1.SetInformationInput(reader.GetOutput())
roiStencil2 = vtk.vtkROIStencilSource()
roiStencil2.SetShapeToCylinderX()
roiStencil2.SetBounds(20,300,80,150,0,0)
roiStencil2.SetInformationInput(reader.GetOutput())
roiStencil3 = vtk.vtkROIStencilSource()
roiStencil3.SetShapeToCylinderZ()
roiStencil3.SetBounds(20,300,80,150,0,0)
roiStencil3.SetInformationInput(reader.GetOutput())
roiStencil4 = vtk.vtkROIStencilSource()
roiStencil4.SetShapeToBox()
roiStencil4.SetBounds(20,300,80,150,0,0)
roiStencil4.SetInformationInput(reader.GetOutput())
stencil1 = vtk.vtkImageStencil()
stencil1.SetInputConnection(reader.GetOutputPort())
stencil1.SetBackgroundInputData(shiftScale.GetOutput())
stencil1.SetStencilConnection(roiStencil1.GetOutputPort())
stencil2 = vtk.vtkImageStencil()
stencil2.SetInputConnection(reader.GetOutputPort())
stencil2.SetBackgroundInputData(shiftScale.GetOutput())
stencil2.SetStencilConnection(roiStencil2.GetOutputPort())
stencil3 = vtk.vtkImageStencil()
stencil3.SetInputConnection(reader.GetOutputPort())
stencil3.SetBackgroundInputData(shiftScale.GetOutput())
stencil3.SetStencilConnection(roiStencil3.GetOutputPort())
stencil4 = vtk.vtkImageStencil()
stencil4.SetInputConnection(reader.GetOutputPort())
stencil4.SetBackgroundInputData(shiftScale.GetOutput())
stencil4.SetStencilConnection(roiStencil4.GetOutputPort())
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(stencil1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(stencil2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(stencil3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(stencil4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(512,512)
imgWin.Render()
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Imaging/Core/Testing/Python/TestROIStencil.py | Python | bsd-3-clause | 3,470 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from datetime import timedelta
from decimal import Decimal
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.datetime_safe import date, datetime
from whoosh.fields import BOOLEAN, DATETIME, KEYWORD, NUMERIC, TEXT
from whoosh.qparser import QueryParser
from haystack import connections, indexes, reset_search_queries
from haystack.exceptions import SkipDocument, SearchBackendError
from haystack.inputs import AutoQuery
from haystack.models import SearchResult
from haystack.query import SearchQuerySet, SQ
from haystack.utils.loading import UnifiedIndex
from ..core.models import AFourthMockModel, AnotherMockModel, MockModel
from ..mocks import MockSearchResult
from .testcases import WhooshTestCase
class WhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class WhooshMockSearchIndexWithSkipDocument(WhooshMockSearchIndex):
def prepare_text(self, obj):
if obj.author == 'daniel3':
raise SkipDocument
return obj.author
class WhooshAnotherMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return AnotherMockModel
def prepare_text(self, obj):
return obj.author
class AllTypesWhooshMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', indexed=False)
pub_date = indexes.DateTimeField(model_attr='pub_date')
sites = indexes.MultiValueField()
seen_count = indexes.IntegerField(indexed=False)
is_active = indexes.BooleanField(default=True)
def get_model(self):
return MockModel
class WhooshMaintainTypeMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
def prepare_text(self, obj):
return "Indexed!\n%s" % obj.pk
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
class WhooshBoostMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True,
template_name='search/indexes/core/mockmodel_template.txt'
)
author = indexes.CharField(model_attr='author', weight=2.0)
editor = indexes.CharField(model_attr='editor')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return AFourthMockModel
def prepare(self, obj):
data = super(WhooshBoostMockSearchIndex, self).prepare(obj)
if obj.pk % 2 == 0:
data['boost'] = 2.0
return data
class WhooshAutocompleteMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='foo', document=True)
name = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
text_auto = indexes.EdgeNgramField(model_attr='foo')
name_auto = indexes.EdgeNgramField(model_attr='author')
def get_model(self):
return MockModel
class WhooshSearchBackendTestCase(WhooshTestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(WhooshSearchBackendTestCase, self).setUp()
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wmmidni = WhooshMockSearchIndexWithSkipDocument()
self.wmtmmi = WhooshMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = MockModel.objects.all()
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(WhooshSearchBackendTestCase, self).tearDown()
def whoosh_search(self, query):
self.raw_whoosh = self.raw_whoosh.refresh()
searcher = self.raw_whoosh.searcher()
return searcher.search(self.parser.parse(query), limit=1000)
def test_non_silent(self):
bad_sb = connections['whoosh'].backend('bad', PATH='/tmp/bad_whoosh', SILENTLY_FAIL=False)
bad_sb.use_file_storage = False
bad_sb.storage = 'omg.wtf.bbq'
try:
bad_sb.update(self.wmmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove('core.mockmodel.1')
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search('foo')
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.wmmi, self.sample_objs)
# Check what Whoosh thinks is there.
self.assertEqual(len(self.whoosh_search(u'*')), 23)
self.assertEqual([doc.fields()['id'] for doc in self.whoosh_search(u'*')], [u'core.mockmodel.%s' % i for i in range(1, 24)])
def test_update_with_SkipDocument_raised(self):
self.sb.update(self.wmmidni, self.sample_objs)
# Check what Whoosh thinks is there.
res = self.whoosh_search(u'*')
self.assertEqual(len(res), 14)
ids = [1, 2, 5, 6, 7, 8, 9, 11, 12, 14, 15, 18, 20, 21]
self.assertListEqual(
[doc.fields()['id'] for doc in res],
[u'core.mockmodel.%s' % i for i in ids]
)
def test_remove(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.remove(self.sample_objs[0])
self.assertEqual(self.sb.index.doc_count(), 22)
def test_clear(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear()
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel])
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([MockModel])
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.index.refresh()
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(self.raw_whoosh.doc_count(), 0)
def test_search(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u'*')), 23)
# No query string should always yield zero results.
self.assertEqual(self.sb.search(u''), {'hits': 0, 'results': []})
# A one letter query string gets nabbed by a stopwords filter. Should
# always yield zero results.
self.assertEqual(self.sb.search(u'a'), {'hits': 0, 'results': []})
# Possible AttributeError?
# self.assertEqual(self.sb.search(u'a b'), {'hits': 0, 'results': [], 'spelling_suggestion': '', 'facets': {}})
self.assertEqual(self.sb.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.sb.search(u'*')['results']], [u'%s' % i for i in range(1, 24)])
self.assertEqual(self.sb.search(u'Indexe')['hits'], 23)
self.assertEqual(self.sb.search(u'Indexe')['spelling_suggestion'], u'indexed')
self.assertEqual(self.sb.search(u'', facets=['name']), {'hits': 0, 'results': []})
results = self.sb.search(u'Index*', facets=['name'])
results = self.sb.search(u'index*', facets=['name'])
self.assertEqual(results['hits'], 23)
self.assertEqual(results['facets'], {})
self.assertEqual(self.sb.search(u'', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}}), {'hits': 0, 'results': []})
results = self.sb.search(u'Index*', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})
results = self.sb.search(u'index*', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})
self.assertEqual(results['hits'], 23)
self.assertEqual(results['facets'], {})
self.assertEqual(self.sb.search(u'', query_facets={'name': '[* TO e]'}), {'hits': 0, 'results': []})
results = self.sb.search(u'Index*', query_facets={'name': '[* TO e]'})
results = self.sb.search(u'index*', query_facets={'name': '[* TO e]'})
self.assertEqual(results['hits'], 23)
self.assertEqual(results['facets'], {})
# self.assertEqual(self.sb.search('', narrow_queries=set(['name:daniel1'])), {'hits': 0, 'results': []})
# results = self.sb.search('Index*', narrow_queries=set(['name:daniel1']))
# self.assertEqual(results['hits'], 1)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.sb.search(u'Index*', result_class=MockSearchResult)['results'][0], MockSearchResult))
# Check the use of ``limit_to_registered_models``.
self.assertEqual(self.sb.search(u'', limit_to_registered_models=False), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search(u'*', limit_to_registered_models=False)['hits'], 23)
self.assertEqual([result.pk for result in self.sb.search(u'*', limit_to_registered_models=False)['results']], [u'%s' % i for i in range(1, 24)])
# Stow.
old_limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False
self.assertEqual(self.sb.search(u''), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.sb.search(u'*')['results']], [u'%s' % i for i in range(1, 24)])
# Restore.
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models
def test_highlight(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u'*')), 23)
self.assertEqual(self.sb.search(u'', highlight=True), {'hits': 0, 'results': []})
self.assertEqual(self.sb.search(u'index*', highlight=True)['hits'], 23)
query = self.sb.search('Index*', highlight=True)['results']
result = [result.highlighted['text'][0] for result in query]
self.assertEqual(result, ['<em>Indexed</em>!\n%d' % i for i in range(1, 24)])
def test_search_all_models(self):
wamsi = WhooshAnotherMockSearchIndex()
self.ui.build(indexes=[self.wmmi, wamsi])
self.sb.update(self.wmmi, self.sample_objs)
self.sb.update(wamsi, AnotherMockModel.objects.all())
self.assertEqual(len(self.whoosh_search(u'*')), 25)
self.ui.build(indexes=[self.wmmi])
def test_more_like_this(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u'*')), 23)
# Now supported by Whoosh (as of 1.8.4). See the ``LiveWhooshMoreLikeThisTestCase``.
self.assertEqual(self.sb.more_like_this(self.sample_objs[0])['hits'], 22)
# Make sure that swapping the ``result_class`` doesn't blow up.
try:
self.sb.more_like_this(self.sample_objs[0], result_class=MockSearchResult)
except:
self.fail()
def test_delete_index(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
self.sb.delete_index()
self.assertEqual(self.sb.index.doc_count(), 0)
def test_order_by(self):
self.sb.update(self.wmmi, self.sample_objs)
results = self.sb.search(u'*', sort_by=['pub_date'])
self.assertEqual([result.pk for result in results['results']], [u'1', u'3', u'2', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23'])
results = self.sb.search(u'*', sort_by=['-pub_date'])
self.assertEqual([result.pk for result in results['results']], [u'23', u'22', u'21', u'20', u'19', u'18', u'17', u'16', u'15', u'14', u'13', u'12', u'11', u'10', u'9', u'8', u'7', u'6', u'5', u'4', u'2', u'3', u'1'])
results = self.sb.search(u'*', sort_by=['id'])
self.assertEqual([result.pk for result in results['results']], [u'1', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'2', u'20', u'21', u'22', u'23', u'3', u'4', u'5', u'6', u'7', u'8', u'9'])
results = self.sb.search(u'*', sort_by=['-id'])
self.assertEqual([result.pk for result in results['results']], [u'9', u'8', u'7', u'6', u'5', u'4', u'3', u'23', u'22', u'21', u'20', u'2', u'19', u'18', u'17', u'16', u'15', u'14', u'13', u'12', u'11', u'10', u'1'])
results = self.sb.search(u'*', sort_by=['-pub_date', '-id'])
self.assertEqual([result.pk for result in results['results']],
[u'23', u'22', u'21', u'20', u'19', u'18', u'17', u'16', u'15', u'14', u'13', u'12',
u'11', u'10', u'9', u'8', u'7', u'6', u'5', u'4', u'2', u'3', u'1' ])
self.assertRaises(SearchBackendError, self.sb.search, u'*', sort_by=['-pub_date', 'id'])
def test__from_python(self):
self.assertEqual(self.sb._from_python('abc'), u'abc')
self.assertEqual(self.sb._from_python(1), 1)
self.assertEqual(self.sb._from_python(2653), 2653)
self.assertEqual(self.sb._from_python(25.5), 25.5)
self.assertEqual(self.sb._from_python([1, 2, 3]), u'1,2,3')
self.assertTrue("a': 1" in self.sb._from_python({'a': 1, 'c': 3, 'b': 2}))
self.assertEqual(self.sb._from_python(datetime(2009, 5, 9, 16, 14)), datetime(2009, 5, 9, 16, 14))
self.assertEqual(self.sb._from_python(datetime(2009, 5, 9, 0, 0)), datetime(2009, 5, 9, 0, 0))
self.assertEqual(self.sb._from_python(datetime(1899, 5, 18, 0, 0)), datetime(1899, 5, 18, 0, 0))
self.assertEqual(self.sb._from_python(datetime(2009, 5, 18, 1, 16, 30, 250)), datetime(2009, 5, 18, 1, 16, 30, 250))
def test__to_python(self):
self.assertEqual(self.sb._to_python('abc'), 'abc')
self.assertEqual(self.sb._to_python('1'), 1)
self.assertEqual(self.sb._to_python('2653'), 2653)
self.assertEqual(self.sb._to_python('25.5'), 25.5)
self.assertEqual(self.sb._to_python('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(self.sb._to_python('{"a": 1, "b": 2, "c": 3}'), {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(self.sb._to_python('2009-05-09T16:14:00'), datetime(2009, 5, 9, 16, 14))
self.assertEqual(self.sb._to_python('2009-05-09T00:00:00'), datetime(2009, 5, 9, 0, 0))
self.assertEqual(self.sb._to_python(None), None)
def test_range_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u'[d TO]')), 23)
self.assertEqual(len(self.whoosh_search(u'name:[d TO]')), 23)
self.assertEqual(len(self.whoosh_search(u'Ind* AND name:[d to]')), 23)
self.assertEqual(len(self.whoosh_search(u'Ind* AND name:[to c]')), 0)
def test_date_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u"pub_date:20090717003000")), 1)
self.assertEqual(len(self.whoosh_search(u"pub_date:20090717000000")), 0)
self.assertEqual(len(self.whoosh_search(u'Ind* AND pub_date:[to 20090717003000]')), 3)
def test_escaped_characters_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search(u"Indexed\!")), 23)
self.assertEqual(len(self.whoosh_search(u"http\:\/\/www\.example\.com")), 0)
def test_build_schema(self):
ui = UnifiedIndex()
ui.build(indexes=[AllTypesWhooshMockSearchIndex()])
(content_field_name, schema) = self.sb.build_schema(ui.all_searchfields())
self.assertEqual(content_field_name, 'text')
schema_names = set(schema.names())
required_schema = {'django_ct', 'django_id', 'id', 'is_active', 'name', 'pub_date', 'seen_count',
'sites', 'text'}
self.assertTrue(required_schema.issubset(schema_names))
self.assertIsInstance(schema._fields['text'], TEXT)
self.assertIsInstance(schema._fields['pub_date'], DATETIME)
self.assertIsInstance(schema._fields['seen_count'], NUMERIC)
self.assertIsInstance(schema._fields['sites'], KEYWORD)
self.assertIsInstance(schema._fields['is_active'], BOOLEAN)
def test_verify_type(self):
old_ui = connections['whoosh'].get_unified_index()
ui = UnifiedIndex()
wmtmmi = WhooshMaintainTypeMockSearchIndex()
ui.build(indexes=[wmtmmi])
connections['whoosh']._index = ui
sb = connections['whoosh'].get_backend()
sb.setup()
sb.update(wmtmmi, self.sample_objs)
self.assertEqual(sb.search(u'*')['hits'], 23)
self.assertEqual([result.month for result in sb.search(u'*')['results']], [u'06', u'07', u'06', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07', u'07'])
connections['whoosh']._index = old_ui
@unittest.skipIf(settings.HAYSTACK_CONNECTIONS['whoosh'].get('STORAGE') != 'file',
'testing writability requires Whoosh to use STORAGE=file')
def test_writable(self):
if not os.path.exists(settings.HAYSTACK_CONNECTIONS['whoosh']['PATH']):
os.makedirs(settings.HAYSTACK_CONNECTIONS['whoosh']['PATH'])
os.chmod(settings.HAYSTACK_CONNECTIONS['whoosh']['PATH'], 0o400)
try:
self.sb.setup()
self.fail()
except IOError:
# Yay. We failed
pass
os.chmod(settings.HAYSTACK_CONNECTIONS['whoosh']['PATH'], 0o755)
def test_slicing(self):
self.sb.update(self.wmmi, self.sample_objs)
page_1 = self.sb.search(u'*', start_offset=0, end_offset=20)
page_2 = self.sb.search(u'*', start_offset=20, end_offset=30)
self.assertEqual(len(page_1['results']), 20)
self.assertEqual([result.pk for result in page_1['results']], [u'%s' % i for i in range(1, 21)])
self.assertEqual(len(page_2['results']), 3)
self.assertEqual([result.pk for result in page_2['results']], [u'21', u'22', u'23'])
# This used to throw an error.
page_0 = self.sb.search(u'*', start_offset=0, end_offset=0)
self.assertEqual(len(page_0['results']), 1)
@unittest.expectedFailure
def test_scoring(self):
self.sb.update(self.wmmi, self.sample_objs)
page_1 = self.sb.search(u'index', start_offset=0, end_offset=20)
page_2 = self.sb.search(u'index', start_offset=20, end_offset=30)
self.assertEqual(len(page_1['results']), 20)
self.assertEqual(["%0.2f" % result.score for result in page_1['results']], ['0.51', '0.51', '0.51', '0.51', '0.51', '0.51', '0.51', '0.51', '0.51', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40', '0.40'])
self.assertEqual(len(page_2['results']), 3)
self.assertEqual(["%0.2f" % result.score for result in page_2['results']], ['0.40', '0.40', '0.40'])
class WhooshBoostBackendTestCase(WhooshTestCase):
def setUp(self):
super(WhooshBoostBackendTestCase, self).setUp()
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshBoostMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = []
for i in range(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = 'daniel'
mock.editor = 'david'
else:
mock.author = 'david'
mock.editor = 'daniel'
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections['whoosh']._index = self.ui
super(WhooshBoostBackendTestCase, self).tearDown()
@unittest.expectedFailure
def test_boost(self):
self.sb.update(self.wmmi, self.sample_objs)
self.raw_whoosh = self.raw_whoosh.refresh()
searcher = self.raw_whoosh.searcher()
self.assertEqual(len(searcher.search(self.parser.parse(u'*'), limit=1000)), 2)
results = SearchQuerySet('whoosh').filter(SQ(author='daniel') | SQ(editor='daniel'))
self.assertEqual([result.id for result in results], [
'core.afourthmockmodel.1',
'core.afourthmockmodel.3',
])
self.assertEqual(results[0].boost, 1.1)
class LiveWhooshSearchQueryTestCase(WhooshTestCase):
def setUp(self):
super(LiveWhooshSearchQueryTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wmtmmi = WhooshMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
self.sample_objs.append(mock)
self.sq = connections['whoosh'].get_query()
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(LiveWhooshSearchQueryTestCase, self).tearDown()
def test_get_spelling(self):
self.sb.update(self.wmmi, self.sample_objs)
self.sq.add_filter(SQ(content='Indexe'))
self.assertEqual(self.sq.get_spelling_suggestion(), u'indexed')
def test_log_query(self):
from django.conf import settings
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
# Stow.
with self.settings(DEBUG=False):
len(self.sq.get_results())
self.assertEqual(len(connections['whoosh'].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
self.sq = connections['whoosh'].get_query()
self.sq.add_filter(SQ(name='bar'))
len(self.sq.get_results())
self.assertEqual(len(connections['whoosh'].queries), 1)
self.assertEqual(connections['whoosh'].queries[0]['query_string'], 'name:(bar)')
# And again, for good measure.
self.sq = connections['whoosh'].get_query()
self.sq.add_filter(SQ(name='baz'))
self.sq.add_filter(SQ(text='foo'))
len(self.sq.get_results())
self.assertEqual(len(connections['whoosh'].queries), 2)
self.assertEqual(connections['whoosh'].queries[0]['query_string'], 'name:(bar)')
self.assertEqual(connections['whoosh'].queries[1]['query_string'], u'(name:(baz) AND text:(foo))')
@override_settings(DEBUG=True)
class LiveWhooshSearchQuerySetTestCase(WhooshTestCase):
def setUp(self):
super(LiveWhooshSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
self.sample_objs.append(mock)
self.sq = connections['whoosh'].get_query()
self.sqs = SearchQuerySet('whoosh')
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(LiveWhooshSearchQuerySetTestCase, self).tearDown()
def test_various_searchquerysets(self):
self.sb.update(self.wmmi, self.sample_objs)
sqs = self.sqs.filter(content='Index')
self.assertEqual(sqs.query.build_query(), u'(Index)')
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query('Indexed!')
self.assertEqual(sqs.query.build_query(), u"('Indexed!')")
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query('Indexed!').filter(pub_date__lte=date(2009, 8, 31))
self.assertEqual(sqs.query.build_query(), u"(('Indexed!') AND pub_date:([to 20090831000000]))")
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query('Indexed!').filter(pub_date__lte=date(2009, 2, 23))
self.assertEqual(sqs.query.build_query(), u"(('Indexed!') AND pub_date:([to 20090223000000]))")
self.assertEqual(len(sqs), 2)
sqs = self.sqs.auto_query('Indexed!').filter(pub_date__lte=date(2009, 2, 25)).filter(django_id__in=[1, 2]).exclude(name='daniel1')
self.assertEqual(sqs.query.build_query(), u'((\'Indexed!\') AND pub_date:([to 20090225000000]) AND django_id:(1 OR 2) AND NOT (name:(daniel1)))')
self.assertEqual(len(sqs), 1)
sqs = self.sqs.auto_query('re-inker')
self.assertEqual(sqs.query.build_query(), u"('re-inker')")
self.assertEqual(len(sqs), 0)
sqs = self.sqs.auto_query('0.7 wire')
self.assertEqual(sqs.query.build_query(), u"('0.7' wire)")
self.assertEqual(len(sqs), 0)
sqs = self.sqs.auto_query("daler-rowney pearlescent 'bell bronze'")
self.assertEqual(sqs.query.build_query(), u"('daler-rowney' pearlescent 'bell bronze')")
self.assertEqual(len(sqs), 0)
sqs = self.sqs.models(MockModel)
self.assertEqual(sqs.query.build_query(), u'*')
self.assertEqual(len(sqs), 3)
def test_all_regression(self):
sqs = SearchQuerySet('whoosh')
self.assertEqual([result.pk for result in sqs], [])
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
sqs = SearchQuerySet('whoosh')
self.assertEqual(len(sqs), 3)
self.assertEqual(sorted([result.pk for result in sqs]), [u'1', u'2', u'3'])
try:
sqs = repr(SearchQuerySet('whoosh'))
except:
self.fail()
def test_regression_space_query(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
sqs = SearchQuerySet('whoosh').auto_query(" ")
self.assertEqual(len(sqs), 3)
sqs = SearchQuerySet('whoosh').filter(content=" ")
self.assertEqual(len(sqs), 0)
def test_iter(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
sqs = self.sqs.auto_query('Indexed!')
results = [int(result.pk) for result in iter(sqs)]
self.assertEqual(sorted(results), [1, 2, 3])
self.assertEqual(len(connections['whoosh'].queries), 1)
def test_slice(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
results = self.sqs.auto_query('Indexed!')
self.assertEqual(sorted([int(result.pk) for result in results[1:3]]), [1, 2])
self.assertEqual(len(connections['whoosh'].queries), 1)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
results = self.sqs.auto_query('Indexed!')
self.assertEqual(int(results[0].pk), 1)
self.assertEqual(len(connections['whoosh'].queries), 1)
def test_values_slicing(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
# TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends
# The values will come back as strings because Hasytack doesn't assume PKs are integers.
# We'll prepare this set once since we're going to query the same results in multiple ways:
expected_pks = ['3', '2', '1']
results = self.sqs.all().order_by('pub_date').values('pk')
self.assertListEqual([i['pk'] for i in results[1:11]], expected_pks)
results = self.sqs.all().order_by('pub_date').values_list('pk')
self.assertListEqual([i[0] for i in results[1:11]], expected_pks)
results = self.sqs.all().order_by('pub_date').values_list('pk', flat=True)
self.assertListEqual(results[1:11], expected_pks)
self.assertEqual(len(connections['whoosh'].queries), 3)
def test_manual_iter(self):
self.sb.update(self.wmmi, self.sample_objs)
results = self.sqs.auto_query('Indexed!')
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
results = [int(result.pk) for result in results._manual_iter()]
self.assertEqual(sorted(results), [1, 2, 3])
self.assertEqual(len(connections['whoosh'].queries), 1)
def test_fill_cache(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
results = self.sqs.auto_query('Indexed!')
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections['whoosh'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 3)
self.assertEqual(len(connections['whoosh'].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 3)
self.assertEqual(len(connections['whoosh'].queries), 2)
def test_cache_is_full(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
self.assertEqual(self.sqs._cache_is_full(), False)
results = self.sqs.auto_query('Indexed!')
result_list = [i for i in iter(results)]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections['whoosh'].queries), 1)
def test_count(self):
more_samples = []
for i in range(1, 50):
mock = MockModel()
mock.id = i
mock.author = 'daniel%s' % i
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
more_samples.append(mock)
self.sb.update(self.wmmi, more_samples)
reset_search_queries()
self.assertEqual(len(connections['whoosh'].queries), 0)
results = self.sqs.all()
self.assertEqual(len(results), 49)
self.assertEqual(results._cache_is_full(), False)
self.assertEqual(len(connections['whoosh'].queries), 1)
def test_query_generation(self):
sqs = self.sqs.filter(SQ(content=AutoQuery("hello world")) | SQ(title=AutoQuery("hello world")))
self.assertEqual(sqs.query.build_query(), u"((hello world) OR title:(hello world))")
def test_result_class(self):
self.sb.update(self.wmmi, self.sample_objs)
# Assert that we're defaulting to ``SearchResult``.
sqs = self.sqs.all()
self.assertTrue(isinstance(sqs[0], SearchResult))
# Custom class.
sqs = self.sqs.result_class(MockSearchResult).all()
self.assertTrue(isinstance(sqs[0], MockSearchResult))
# Reset to default.
sqs = self.sqs.result_class(None).all()
self.assertTrue(isinstance(sqs[0], SearchResult))
class LiveWhooshMultiSearchQuerySetTestCase(WhooshTestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveWhooshMultiSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wamsi = WhooshAnotherMockSearchIndex()
self.ui.build(indexes=[self.wmmi, self.wamsi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.wmmi.update(using='whoosh')
self.wamsi.update(using='whoosh')
self.sqs = SearchQuerySet('whoosh')
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(LiveWhooshMultiSearchQuerySetTestCase, self).tearDown()
def test_searchquerysets_with_models(self):
sqs = self.sqs.all()
self.assertEqual(sqs.query.build_query(), u'*')
self.assertEqual(len(sqs), 25)
sqs = self.sqs.models(MockModel)
self.assertEqual(sqs.query.build_query(), u'*')
self.assertEqual(len(sqs), 23)
sqs = self.sqs.models(AnotherMockModel)
self.assertEqual(sqs.query.build_query(), u'*')
self.assertEqual(len(sqs), 2)
class LiveWhooshMoreLikeThisTestCase(WhooshTestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveWhooshMoreLikeThisTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wamsi = WhooshAnotherMockSearchIndex()
self.ui.build(indexes=[self.wmmi, self.wamsi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.wmmi.update()
self.wamsi.update()
self.sqs = SearchQuerySet('whoosh')
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(LiveWhooshMoreLikeThisTestCase, self).tearDown()
# We expect failure here because, despite not changing the code, Whoosh
# 2.5.1 returns incorrect counts/results. Huzzah.
@unittest.expectedFailure
def test_more_like_this(self):
mlt = self.sqs.more_like_this(MockModel.objects.get(pk=22))
self.assertEqual(mlt.count(), 22)
self.assertEqual(sorted([result.pk for result in mlt]), sorted([u'9', u'8', u'7', u'6', u'5', u'4', u'3', u'2', u'1', u'21', u'20', u'19', u'18', u'17', u'16', u'15', u'14', u'13', u'12', u'11', u'10', u'23']))
self.assertEqual(len([result.pk for result in mlt]), 22)
alt_mlt = self.sqs.filter(name='daniel3').more_like_this(MockModel.objects.get(pk=13))
self.assertEqual(alt_mlt.count(), 8)
self.assertEqual(sorted([result.pk for result in alt_mlt]), sorted([u'4', u'3', u'22', u'19', u'17', u'16', u'10', u'23']))
self.assertEqual(len([result.pk for result in alt_mlt]), 8)
alt_mlt_with_models = self.sqs.models(MockModel).more_like_this(MockModel.objects.get(pk=11))
self.assertEqual(alt_mlt_with_models.count(), 22)
self.assertEqual(sorted([result.pk for result in alt_mlt_with_models]), sorted([u'9', u'8', u'7', u'6', u'5', u'4', u'3', u'2', u'1', u'22', u'21', u'20', u'19', u'18', u'17', u'16', u'15', u'14', u'13', u'12', u'10', u'23']))
self.assertEqual(len([result.pk for result in alt_mlt_with_models]), 22)
if hasattr(MockModel.objects, 'defer'):
# Make sure MLT works with deferred bits.
mi = MockModel.objects.defer('foo').get(pk=21)
self.assertEqual(mi._deferred, True)
deferred = self.sqs.models(MockModel).more_like_this(mi)
self.assertEqual(deferred.count(), 0)
self.assertEqual([result.pk for result in deferred], [])
self.assertEqual(len([result.pk for result in deferred]), 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.sqs.result_class(MockSearchResult).more_like_this(MockModel.objects.get(pk=21))[0], MockSearchResult))
@override_settings(DEBUG=True)
class LiveWhooshAutocompleteTestCase(WhooshTestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveWhooshAutocompleteTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wacsi = WhooshAutocompleteMockModelSearchIndex()
self.ui.build(indexes=[self.wacsi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
# Stow.
import haystack
self.sb.setup()
self.sqs = SearchQuerySet('whoosh')
# Wipe it clean.
self.sqs.query.backend.clear()
self.wacsi.update(using='whoosh')
def tearDown(self):
connections['whoosh']._index = self.old_ui
super(LiveWhooshAutocompleteTestCase, self).tearDown()
def test_autocomplete(self):
autocomplete = self.sqs.autocomplete(text_auto='mod')
self.assertEqual(autocomplete.count(), 5)
self.assertEqual([result.pk for result in autocomplete], [u'1', u'12', u'6', u'7', u'14'])
self.assertTrue('mod' in autocomplete[0].text.lower())
self.assertTrue('mod' in autocomplete[1].text.lower())
self.assertTrue('mod' in autocomplete[2].text.lower())
self.assertTrue('mod' in autocomplete[3].text.lower())
self.assertTrue('mod' in autocomplete[4].text.lower())
self.assertEqual(len([result.pk for result in autocomplete]), 5)
def test_edgengram_regression(self):
autocomplete = self.sqs.autocomplete(text_auto='ngm')
self.assertEqual(autocomplete.count(), 0)
def test_extra_whitespace(self):
autocomplete = self.sqs.autocomplete(text_auto='mod ')
self.assertEqual(autocomplete.count(), 5)
class WhooshRoundTripSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, default='')
name = indexes.CharField()
is_active = indexes.BooleanField()
post_count = indexes.IntegerField()
average_rating = indexes.FloatField()
price = indexes.DecimalField()
pub_date = indexes.DateField()
created = indexes.DateTimeField()
tags = indexes.MultiValueField()
sites = indexes.MultiValueField()
# For a regression involving lists with nothing in them.
empty_list = indexes.MultiValueField()
def get_model(self):
return MockModel
def prepare(self, obj):
prepped = super(WhooshRoundTripSearchIndex, self).prepare(obj)
prepped.update({
'text': 'This is some example text.',
'name': 'Mister Pants',
'is_active': True,
'post_count': 25,
'average_rating': 3.6,
'price': Decimal('24.99'),
'pub_date': date(2009, 11, 21),
'created': datetime(2009, 11, 21, 21, 31, 00),
'tags': ['staff', 'outdoor', 'activist', 'scientist'],
'sites': [3, 5, 1],
'empty_list': [],
})
return prepped
@override_settings(DEBUG=True)
class LiveWhooshRoundTripTestCase(WhooshTestCase):
def setUp(self):
super(LiveWhooshRoundTripTestCase, self).setUp()
# Stow.
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wrtsi = WhooshRoundTripSearchIndex()
self.ui.build(indexes=[self.wrtsi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sqs = SearchQuerySet('whoosh')
# Wipe it clean.
self.sqs.query.backend.clear()
# Fake indexing.
mock = MockModel()
mock.id = 1
self.sb.update(self.wrtsi, [mock])
def tearDown(self):
super(LiveWhooshRoundTripTestCase, self).tearDown()
def test_round_trip(self):
results = self.sqs.filter(id='core.mockmodel.1')
# Sanity check.
self.assertEqual(results.count(), 1)
# Check the individual fields.
result = results[0]
self.assertEqual(result.id, 'core.mockmodel.1')
self.assertEqual(result.text, 'This is some example text.')
self.assertEqual(result.name, 'Mister Pants')
self.assertEqual(result.is_active, True)
self.assertEqual(result.post_count, 25)
self.assertEqual(result.average_rating, 3.6)
self.assertEqual(result.price, u'24.99')
self.assertEqual(result.pub_date, datetime(2009, 11, 21, 0, 0))
self.assertEqual(result.created, datetime(2009, 11, 21, 21, 31, 00))
self.assertEqual(result.tags, ['staff', 'outdoor', 'activist', 'scientist'])
self.assertEqual(result.sites, [u'3', u'5', u'1'])
self.assertEqual(result.empty_list, [])
# Check boolean filtering...
results = self.sqs.filter(id='core.mockmodel.1', is_active=True)
self.assertEqual(results.count(), 1)
@override_settings(DEBUG=True)
class LiveWhooshRamStorageTestCase(TestCase):
def setUp(self):
super(LiveWhooshRamStorageTestCase, self).setUp()
# Stow.
self.old_whoosh_storage = settings.HAYSTACK_CONNECTIONS['whoosh'].get('STORAGE', 'file')
settings.HAYSTACK_CONNECTIONS['whoosh']['STORAGE'] = 'ram'
self.old_ui = connections['whoosh'].get_unified_index()
self.ui = UnifiedIndex()
self.wrtsi = WhooshRoundTripSearchIndex()
self.ui.build(indexes=[self.wrtsi])
self.sb = connections['whoosh'].get_backend()
connections['whoosh']._index = self.ui
# Stow.
import haystack
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sqs = SearchQuerySet('whoosh')
# Wipe it clean.
self.sqs.query.backend.clear()
# Fake indexing.
mock = MockModel()
mock.id = 1
self.sb.update(self.wrtsi, [mock])
def tearDown(self):
self.sqs.query.backend.clear()
settings.HAYSTACK_CONNECTIONS['whoosh']['STORAGE'] = self.old_whoosh_storage
connections['whoosh']._index = self.old_ui
super(LiveWhooshRamStorageTestCase, self).tearDown()
def test_ram_storage(self):
results = self.sqs.filter(id='core.mockmodel.1')
# Sanity check.
self.assertEqual(results.count(), 1)
# Check the individual fields.
result = results[0]
self.assertEqual(result.id, 'core.mockmodel.1')
self.assertEqual(result.text, 'This is some example text.')
self.assertEqual(result.name, 'Mister Pants')
self.assertEqual(result.is_active, True)
self.assertEqual(result.post_count, 25)
self.assertEqual(result.average_rating, 3.6)
self.assertEqual(result.pub_date, datetime(2009, 11, 21, 0, 0))
self.assertEqual(result.created, datetime(2009, 11, 21, 21, 31, 00))
self.assertEqual(result.tags, ['staff', 'outdoor', 'activist', 'scientist'])
self.assertEqual(result.sites, [u'3', u'5', u'1'])
self.assertEqual(result.empty_list, [])
| antonyr/django-haystack | test_haystack/whoosh_tests/test_whoosh_backend.py | Python | bsd-3-clause | 45,377 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This package implements a very simple Qt GUI that can load a
pipeline, change its parameters based on aliases, and execute them on
the spreadsheet."""
from __future__ import division
identifier = 'org.vistrails.vistrails.pipelineedit'
name = 'Pipeline Editor'
version = '0.0.2'
old_identifiers = ['edu.utah.sci.vistrails.pipelineedit']
| VisTrails/VisTrails | vistrails/packages/pipelineEdit/__init__.py | Python | bsd-3-clause | 2,254 |
import base64
import json
import os
import pickle
from Crypto import Random
from Crypto.Cipher import AES
from cumulusci.core.config import BaseConfig
from cumulusci.core.config import ConnectedAppOAuthConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import ScratchOrgConfig
from cumulusci.core.config import ServiceConfig
from cumulusci.core.exceptions import OrgNotFound
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.exceptions import ServiceNotValid
from cumulusci.core.exceptions import KeychainConnectedAppNotFound
class BaseProjectKeychain(BaseConfig):
encrypted = False
def __init__(self, project_config, key):
super(BaseProjectKeychain, self).__init__()
self.config = {
'orgs': {},
'app': None,
'services': {},
}
self.project_config = project_config
self.key = key
self._load_keychain()
def _load_keychain(self):
""" Subclasses can override to implement logic to load the keychain """
pass
def change_key(self, key):
""" re-encrypt stored services, orgs, and the connected_app
with the new key """
connected_app = self.get_connected_app()
services = {}
for service_name in self.list_services():
services[service_name] = self.get_service(service_name)
orgs = {}
for org_name in self.list_orgs():
orgs[org_name] = self.get_org(org_name)
self.key = key
if connected_app:
self.set_connected_app(connected_app)
if orgs:
for org_name, org_config in orgs.items():
self.set_org(org_name, org_config)
if services:
for service_name, service_config in services.items():
self.set_service(service_name, service_config)
def set_connected_app(self, app_config, project=False):
""" store a connected_app configuration """
self._set_connected_app(app_config, project)
self._load_keychain()
def _set_connected_app(self, app_config, project):
self.app = app_config
def get_connected_app(self):
""" retrieve the connected app configuration """
return self._get_connected_app()
def _get_connected_app(self):
return self.app
def set_org(self, name, org_config, global_org=False):
if isinstance(org_config, ScratchOrgConfig):
org_config.config['scratch'] = True
self._set_org(name, org_config, global_org)
self._load_keychain()
def _set_org(self, name, org_config, global_org):
self.orgs[name] = org_config
def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return org, org_config
return None, None
def set_default_org(self, name):
""" set the default org for tasks by name key """
org = self.get_org(name)
self.unset_default_org()
org.config['default'] = True
self.set_org(name, org)
def unset_default_org(self):
""" unset the default orgs for tasks """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
del org_config.config['default']
self.set_org(org, org_config)
def get_org(self, name):
""" retrieve an org configuration by name key """
if name not in self.orgs:
self._raise_org_not_found(name)
return self._get_org(name)
def _get_org(self, name):
return self.orgs.get(name)
def _raise_org_not_found(self, name):
raise OrgNotFound('Org named {} was not found in keychain'.format(name))
def list_orgs(self):
""" list the orgs configured in the keychain """
orgs = self.orgs.keys()
orgs.sort()
return orgs
def set_service(self, name, service_config, project=False):
""" Store a ServiceConfig in the keychain """
if name not in self.project_config.services:
self._raise_service_not_valid(name)
self._validate_service(name, service_config)
self._set_service(name, service_config, project)
self._load_keychain()
def _set_service(self, name, service_config, project):
self.services[name] = service_config
def get_service(self, name):
""" Retrieve a stored ServiceConfig from the keychain or exception
:param name: the service name to retrieve
:type name: str
:rtype ServiceConfig
:return the configured Service
"""
if name not in self.project_config.services:
self._raise_service_not_valid(name)
if name not in self.services:
self._raise_service_not_configured(name)
return self._get_service(name)
def _get_service(self, name):
return self.services.get(name)
def _validate_service(self, name, service_config):
missing_required = []
attr_key = 'services__{0}__attributes'.format(name)
for atr, config in getattr(self.project_config, attr_key).iteritems():
if config.get('required') is True and not getattr(service_config, atr):
missing_required.append(atr)
if missing_required:
self._raise_service_not_valid(name)
def _raise_service_not_configured(self, name):
raise ServiceNotConfigured(
'Service named {} is not configured for this project'.format(name)
)
def _raise_service_not_valid(self, name):
raise ServiceNotValid('Service named {} is not valid for this project'.format(name))
def list_services(self):
""" list the services configured in the keychain """
services = self.services.keys()
services.sort()
return services
class EnvironmentProjectKeychain(BaseProjectKeychain):
""" A project keychain that stores org credentials in environment variables """
encrypted = False
org_var_prefix = 'CUMULUSCI_ORG_'
app_var = 'CUMULUSCI_CONNECTED_APP'
service_var_prefix = 'CUMULUSCI_SERVICE_'
def _load_keychain(self):
self._load_keychain_app()
self._load_keychain_orgs()
self._load_keychain_services()
def _load_keychain_app(self):
app = os.environ.get(self.app_var)
if app:
self.app = ConnectedAppOAuthConfig(json.loads(app))
def _load_keychain_orgs(self):
for key, value in os.environ.items():
if key.startswith(self.org_var_prefix):
org_config = json.loads(value)
if org_config.get('scratch'):
self.orgs[key[len(self.org_var_prefix):]] = ScratchOrgConfig(json.loads(value))
else:
self.orgs[key[len(self.org_var_prefix):]] = OrgConfig(json.loads(value))
def _load_keychain_services(self):
for key, value in os.environ.items():
if key.startswith(self.service_var_prefix):
self.services[key[len(self.service_var_prefix):]] = ServiceConfig(json.loads(value))
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
class BaseEncryptedProjectKeychain(BaseProjectKeychain):
""" Base class for building project keychains that use AES encryption for securing stored org credentials """
encrypted = True
def _set_connected_app(self, app_config, project):
encrypted = self._encrypt_config(app_config)
self._set_encrypted_connected_app(encrypted, project)
def _set_encrypted_connected_app(self, encrypted, project):
self.app = encrypted
def _get_connected_app(self):
if self.app:
return self._decrypt_config(ConnectedAppOAuthConfig, self.app)
def _get_service(self, name):
return self._decrypt_config(ServiceConfig, self.services[name])
def _set_service(self, service, service_config, project):
encrypted = self._encrypt_config(service_config)
self._set_encrypted_service(service, encrypted, project)
def _set_encrypted_service(self, service, encrypted, project):
self.services[service] = encrypted
def _set_org(self, name, org_config, global_org):
encrypted = self._encrypt_config(org_config)
self._set_encrypted_org(name, encrypted, global_org)
def _set_encrypted_org(self, name, encrypted, global_org):
self.orgs[name] = encrypted
def _get_org(self, name):
return self._decrypt_config(OrgConfig, self.orgs[name])
def _get_cipher(self, iv=None):
if iv is None:
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return cipher, iv
def _encrypt_config(self, config):
pickled = pickle.dumps(config.config)
pickled = pad(pickled)
#pickled = base64.b64encode(pickled)
cipher, iv = self._get_cipher()
encrypted = base64.b64encode(iv + cipher.encrypt(pickled))
return encrypted
def _decrypt_config(self, config_class, encrypted_config):
if not encrypted_config:
return config_class()
encrypted_config = base64.b64decode(encrypted_config)
iv = encrypted_config[:16]
cipher, iv = self._get_cipher(iv)
pickled = cipher.decrypt(encrypted_config[16:])
config_dict = pickle.loads(pickled)
if config_dict.get('scratch'):
config_class = ScratchOrgConfig
return config_class(pickle.loads(pickled))
class EncryptedFileProjectKeychain(BaseEncryptedProjectKeychain):
""" An encrypted project keychain that stores in the project's local directory """
@property
def config_local_dir(self):
return os.path.join(
os.path.expanduser('~'),
self.project_config.global_config_obj.config_local_dir,
)
@property
def project_local_dir(self):
return self.project_config.project_local_dir
def _load_keychain(self):
def load_files(dirname):
for item in os.listdir(dirname):
if item.endswith('.org'):
with open(os.path.join(dirname, item), 'r') as f_item:
org_config = f_item.read()
org_name = item.replace('.org', '')
self.config['orgs'][org_name] = org_config
elif item.endswith('.service'):
with open(os.path.join(dirname, item), 'r') as f_item:
service_config = f_item.read()
service_name = item.replace('.service', '')
self.config['services'][service_name] = service_config
elif item == 'connected.app':
with open(os.path.join(dirname, item), 'r') as f_item:
app_config = f_item.read()
self.config['app'] = app_config
load_files(self.config_local_dir)
if not self.project_local_dir:
return
load_files(self.project_local_dir)
def _set_encrypted_connected_app(self, encrypted, project):
if project:
filename = os.path.join(self.project_local_dir, 'connected.app')
else:
filename = os.path.join(self.config_local_dir, 'connected.app')
with open(filename, 'w') as f_org:
f_org.write(encrypted)
self.app = encrypted
def _set_encrypted_org(self, name, encrypted, global_org):
if global_org:
filename = os.path.join(self.config_local_dir, '{}.org'.format(name))
else:
filename = os.path.join(self.project_local_dir, '{}.org'.format(name))
with open(filename, 'w') as f_org:
f_org.write(encrypted)
def _set_encrypted_service(self, name, encrypted, project):
if project:
filename = os.path.join(self.project_local_dir, '{}.service'.format(name))
else:
filename = os.path.join(self.config_local_dir, '{}.service'.format(name))
with open(filename, 'w') as f_service:
f_service.write(encrypted)
def _raise_org_not_found(self, name):
raise OrgNotFound(
'Org information could not be found. Expected to find encrypted file at {}/{}.org'.format(
self.project_local_dir,
name
)
)
def _raise_service_not_configured(self, name):
raise ServiceNotConfigured(
'Service configuration could not be found. Expected to find encrypted file at {}/{}.org'.format(
self.project_local_dir,
name
)
)
| Joble/CumulusCI | cumulusci/core/keychain.py | Python | bsd-3-clause | 12,877 |
# encoding: utf-8
"""
Admin interface for the sphinxdoc app.
"""
from django.contrib import admin
from sphinxdoc.models import Project, Document
class ProjectAdmin(admin.ModelAdmin):
"""Admin interface for :class:`~sphinxdoc.models.Project`."""
list_display = ('name', 'path',)
prepopulated_fields = {'slug': ('name',)}
class DocumentAdmin(admin.ModelAdmin):
"""
Admin interface for :class:`~sphinxdoc.models.Document`.
Normally, you shouldn’t need this, since you create new documents via
the management command.
"""
pass
admin.site.register(Project, ProjectAdmin)
admin.site.register(Document, DocumentAdmin)
| yawd/django-sphinxdoc | sphinxdoc/admin.py | Python | bsd-3-clause | 659 |
# -*- coding:utf-8 -*-
import sys
import config ,util ,logging ,log,downloader
import xiami as xm
import netease
import re
from threadpool import ThreadPool
from time import sleep
from os import path
from threadpool import Terminate_Watcher
from proxypool import ProxyPool
LOG = log.get_logger("zxLogger")
dl_songs = []
total = 0
done = 0
fmt_parsing = u'解析: "%s" ..... [%s] %s'
fmt_has_song_nm = u'包含%d首歌曲.'
fmt_single_song = u'[曲目] %s'
border = log.hl(u'%s'% ('='*90), 'cyan')
pat_xm = r'^https?://[^/.]*\.xiami\.com/'
pat_163 = r'^https?://music\.163\.com/'
#proxypool
ppool = None
def shall_I_begin(in_str, is_file=False, is_hq=False, need_proxy_pool = False):
#start terminate_watcher
Terminate_Watcher()
global ppool
if need_proxy_pool:
LOG.info(u'初始化proxy pool')
ppool = ProxyPool()
LOG.info(u'proxy pool:[%d] 初始完毕'%len(ppool.proxies))
#xiami obj
xiami_obj = xm.Xiami(config.XIAMI_LOGIN_EMAIL,\
config.XIAMI_LOGIN_PASSWORD, \
is_hq,proxies=ppool)
#netease obj
m163 = netease.Netease(is_hq, proxies=ppool)
if is_file:
from_file(xiami_obj, m163,in_str)
elif re.match(pat_xm, in_str):
from_url_xm(xiami_obj, in_str)
elif re.match(pat_163, in_str):
from_url_163(m163, in_str)
print border
if len(dl_songs):
LOG.info(u' 下载任务总数: %d \n 3秒后开始下载' % len(dl_songs))
sleep(3)
downloader.start_download(dl_songs)
else:
LOG.warning(u' 没有可下载任务,自动退出.')
def from_url_163(m163, url, verbose=True):
""" parse the input string (163 url), and do download"""
LOG.debug('processing 163 url: "%s"'% url)
msg = u''
if '/song?id=' in url:
song =netease.NeteaseSong(m163,url=url)
dl_songs.append(song)
msg = fmt_parsing % (m163_url_abbr(url),u'曲目', song.song_name)
elif '/album?id=' in url:
album = netease.NeteaseAlbum(m163, url)
dl_songs.extend(album.songs)
msgs = [fmt_parsing % (m163_url_abbr(url),u'专辑', album.artist_name+u' => '+album.album_name)]
if verbose:
for s in album.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(album.songs))
msg= u' => '.join(msgs)
elif '/playlist?id=' in url:
playlist = netease.NeteasePlayList(m163, url)
dl_songs.extend(playlist.songs)
msgs = [ fmt_parsing % (m163_url_abbr(url),u'歌单',playlist.playlist_name)]
if verbose:
for s in playlist.songs:
msgs.append( fmt_single_song % s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(playlist.songs))
msg= u' => '.join(msgs)
elif '/artist?id=' in url:
topsong= netease.NeteaseTopSong(m163, url)
dl_songs.extend(topsong.songs)
msgs = [fmt_parsing % (m163_url_abbr(url), u'艺人热门歌曲',topsong.artist_name)]
if verbose:
for s in topsong.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(topsong.songs))
msg = u' => '.join(msgs)
global total, done
done +=1
pre = ('[%d/%d] ' % (done, total)) if not verbose else ''
if not msg:
#unknown url
LOG.error(u'%s [易]不能识别的url [%s].' % (pre,url))
else:
LOG.info(u'%s%s'% (pre,msg))
def from_url_xm(xm_obj, url, verbose=True):
""" parse the input string (xiami url), and do download"""
LOG.debug('processing xiami url: "%s"'% url)
msg = u''
if '/collect/' in url:
collect = xm.Collection(xm_obj, url)
dl_songs.extend(collect.songs)
msgs = [ fmt_parsing % (xiami_url_abbr(url),u'精选集',collect.collection_name)]
if verbose:
for s in collect.songs:
msgs.append( fmt_single_song % s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(collect.songs))
msg= u' => '.join(msgs)
elif '/song/' in url:
song = xm.XiamiSong(xm_obj, url=url)
dl_songs.append(song)
msg = fmt_parsing % (xiami_url_abbr(url),u'曲目', song.song_name)
elif '/album/' in url:
album = xm.Album(xm_obj, url)
dl_songs.extend(album.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url),u'专辑', album.artist_name+u' => '+album.album_name)]
if verbose:
for s in album.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(album.songs))
msg= u' => '.join(msgs)
elif '/lib-song/u/' in url:
if verbose:
LOG.warning(u'[虾]如用户收藏较多,解析歌曲需要较长时间,请耐心等待')
fav = xm.Favorite(xm_obj, url, verbose)
dl_songs.extend(fav.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url), u'用户收藏','')]
if verbose:
for s in fav.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(fav.songs))
msg = u' => '.join(msgs)
elif re.search(r'/artist/top/id/\d+', url):
topsong=xm.TopSong(xm_obj, url)
dl_songs.extend(topsong.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url), u'艺人热门歌曲',topsong.artist_name)]
if verbose:
for s in topsong.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(topsong.songs))
msg = u' => '.join(msgs)
global total, done
done +=1
pre = ('[%d/%d] ' % (done, total)) if not verbose else ''
if not msg:
#unknown url
LOG.error(u'%s [虾]不能识别的url [%s].' % (pre,url))
else:
LOG.info(u'%s%s'% (pre,msg))
def from_file(xm_obj,m163, infile):
""" download objects (songs, albums...) from an input file. """
urls = []
with open(infile) as f:
urls = f.readlines()
global total, done
total = len(urls)
print border
LOG.info(u' 文件包含链接总数: %d' % total)
print border
pool = ThreadPool(config.THREAD_POOL_SIZE)
for link in [u for u in urls if u]:
link = link.rstrip('\n')
if re.match(pat_xm, link):
pool.add_task(from_url_xm, xm_obj,link, verbose=False)
elif re.match(pat_163, link):
pool.add_task(from_url_163, m163,link, verbose=False)
else:
LOG.warning(u' 略过不能识别的url [%s].' % link)
pool.wait_completion()
def xiami_url_abbr(url):
return re.sub(pat_xm,u'[虾] ',url)
def m163_url_abbr(url):
return re.sub(pat_163,u'[易] ',url)
| yosef-gao/zhuaxia | zhuaxia/commander.py | Python | mit | 7,171 |
"""Parsimonious's public API. Import from here.
Things may move around in modules deeper than this one.
"""
from parsimonious.exceptions import (ParseError, IncompleteParseError,
VisitationError, UndefinedLabel,
BadGrammar)
from parsimonious.grammar import Grammar, TokenGrammar
from parsimonious.nodes import NodeVisitor, VisitationError, rule
| erikrose/parsimonious | parsimonious/__init__.py | Python | mit | 421 |
import autocomplete_light
from cities_light.models import City
autocomplete_light.register(City, search_fields=('search_names',),
autocomplete_js_attributes={'placeholder': 'city name ..'})
| spookylukey/django-autocomplete-light | test_project/fk_autocomplete/autocomplete_light_registry.py | Python | mit | 196 |
import io
import shutil
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.test import tutils
from mitmproxy.addons import dumper
from mitmproxy import exceptions
from mitmproxy import http
def test_configure():
d = dumper.Dumper()
with taddons.context(d) as ctx:
ctx.configure(d, dumper_filter="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, dumper_filter=None)
assert not d.filter
with pytest.raises(exceptions.OptionsError):
ctx.configure(d, dumper_filter="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.client_conn = mock.MagicMock()
flow.client_conn.address[0] = "foo"
flow.response = tutils.tresp(content=None)
flow.response.is_replay = True
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = http.HTTPResponse.wrap(tutils.tresp())
flow.response.content = None
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
def test_echo_body():
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 100
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response)
t = sio.getvalue()
assert "cut off" in t
def test_echo_request_line():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = True
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = False
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=0, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
class TestContentView:
@pytest.mark.asyncio
async def test_contentview(self):
with mock.patch("mitmproxy.contentviews.auto.ViewAuto.__call__") as va:
va.side_effect = exceptions.ContentViewException("")
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow())
assert await ctx.master.await_log("content viewer failed")
def test_tcp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio.getvalue()
def test_websocket():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
f = tflow.twebsocketflow(client_conn=True, err=True)
d.websocket_error(f)
assert "Error in WebSocket" in sio.getvalue()
| ujjwal96/mitmproxy | test/mitmproxy/addons/test_dumper.py | Python | mit | 5,783 |
#!/usr/bin/env python
# coding=utf-8
"""
The `META-INF/container.xml` file, storing:
1. the Rendition objects
2. the Rendition Mapping Document
"""
from yael.element import Element
from yael.jsonable import JSONAble
from yael.mediatype import MediaType
from yael.namespace import Namespace
from yael.rendition import Rendition
from yael.rmdocument import RMDocument
import yael.util
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2015, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.9"
__email__ = "alberto@albertopettarin.it"
__status__ = "Development"
class Container(Element):
"""
Build the `META-INF/container.xml` file
or parse it from `string` or `obj`.
"""
A_ACCESSMODE = "accessMode"
A_FULL_PATH = "full-path"
A_HREF = "href"
A_LABEL = "label"
A_LANGUAGE = "language"
A_LAYOUT = "layout"
A_MEDIA = "media"
A_MEDIA_TYPE = "media-type"
A_MEDIA_TYPE = "media-type"
A_REL = "rel"
A_NS_ACCESSMODE = "{{{0}}}{1}".format(Namespace.RENDITION, A_ACCESSMODE)
A_NS_LABEL = "{{{0}}}{1}".format(Namespace.RENDITION, A_LABEL)
A_NS_LANGUAGE = "{{{0}}}{1}".format(Namespace.RENDITION, A_LANGUAGE)
A_NS_LAYOUT = "{{{0}}}{1}".format(Namespace.RENDITION, A_LAYOUT)
A_NS_MEDIA = "{{{0}}}{1}".format(Namespace.RENDITION, A_MEDIA)
E_CONTAINER = "container"
E_LINK = "link"
E_ROOTFILE = "rootfile"
E_ROOTFILES = "rootfiles"
V_ACCESSMODE_AUDITORY = "auditory"
V_ACCESSMODE_TACTILE = "tactile"
V_ACCESSMODE_TEXTUAL = "textual"
V_ACCESSMODE_VISUAL = "visual"
V_LAYOUT_PRE_PAGINATED = "pre-paginated"
V_LAYOUT_REFLOWABLE = "reflowable"
V_REL_MAPPING = "mapping"
def __init__(self, internal_path=None, obj=None, string=None):
self.renditions = []
self.rm_document = None
Element.__init__(
self,
internal_path=internal_path,
obj=obj,
string=string)
def json_object(self, recursive=True):
obj = {
"internal_path": self.internal_path,
"renditions": len(self.renditions),
"rm_document": (self.rm_document == None),
}
if recursive:
obj["renditions"] = JSONAble.safe(self.renditions)
obj["rm_document"] = JSONAble.safe(self.rm_document)
return obj
def parse_object(self, obj):
try:
# locate `<container>` element
container_arr = yael.util.query_xpath(
obj=obj,
query="/{0}:{1}",
args=['c', Container.E_CONTAINER],
nsp={'c': Namespace.CONTAINER},
required=Container.E_CONTAINER)
container = container_arr[0]
# locate `<rootfile>` elements
rootfile_arr = yael.util.query_xpath(
obj=container,
query="{0}:{1}/{0}:{2}",
args=['c', Container.E_ROOTFILES, Container.E_ROOTFILE],
nsp={'c': Namespace.CONTAINER},
required=None)
for rootfile in rootfile_arr:
self._parse_rootfile(rootfile)
# locate `<link>` optional element
link_arr = yael.util.query_xpath(
obj=container,
query="{0}:{1}",
args=['c', Container.E_LINK],
nsp={'c': Namespace.CONTAINER},
required=None)
for link in link_arr:
self._parse_link(link)
except:
raise Exception("Error while parsing the given object")
def add_rendition(self, rendition):
"""
Add a Rendition to this Container.
:param rendition: Rendition to be added
:type rendition: :class:`yael.rendition.Rendition`
"""
self.renditions.append(rendition)
@property
def renditions(self):
"""
The list of Rendition objects in this Container.
:rtype: list of :class:`yael.rendition.Rendition`
"""
return self.__renditions
@renditions.setter
def renditions(self, renditions):
self.__renditions = renditions
@property
def rm_document(self):
"""
The Rendition Mapping Document object in this Container,
or None if it is not present.
:rtype: :class:`yael.rmdocument.RMDocument`
"""
return self.__rm_document
@rm_document.setter
def rm_document(self, rm_document):
self.__rm_document = rm_document
@property
def default_rendition(self):
"""
The Default Rendition object in this Container,
or None if there are no Renditions.
:rtype: :class:`yael.rendition.Rendition`
"""
return yael.util.safe_first(self.renditions)
def _parse_rootfile(self, obj):
"""
Parse the given `<rootfile>` node object,
and append the parsed Rendition to this Container.
"""
# required attributes
full_path = obj.get(Container.A_FULL_PATH)
media_type = obj.get(Container.A_MEDIA_TYPE)
if (full_path != None) and (media_type != None):
r_obj = Rendition(internal_path=full_path)
r_obj.v_full_path = full_path
r_obj.v_media_type = media_type
# multiple renditions
r_obj.v_rendition_accessmode = obj.get(Container.A_NS_ACCESSMODE)
r_obj.v_rendition_label = obj.get(Container.A_NS_LABEL)
r_obj.v_rendition_language = obj.get(Container.A_NS_LANGUAGE)
r_obj.v_rendition_layout = obj.get(Container.A_NS_LAYOUT)
r_obj.v_rendition_media = obj.get(Container.A_NS_MEDIA)
self.renditions.append(r_obj)
def _parse_link(self, obj):
"""
Parse the given `<link>` node object,
and append the parsed RMDocument
to this Container.
"""
# required attributes for rendition mapping document
rel = obj.get(Container.A_REL)
href = obj.get(Container.A_HREF)
media_type = obj.get(Container.A_MEDIA_TYPE)
if ((rel == Container.V_REL_MAPPING) and
(media_type == MediaType.XHTML) and
(href != None)):
self.rm_document = RMDocument(internal_path=href)
return None
| takuan-osho/yael | yael/container.py | Python | mit | 6,350 |
import renderdoc as rd
import rdtest
class VK_SPIRV_13_Shaders(rdtest.TestCase):
demos_test_name = 'VK_SPIRV_13_Shaders'
def check_capture(self):
action = self.find_action("Draw")
self.check(action is not None)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Vertex)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'pos' or refl.inputSignature[0].compCount != 3):
raise rdtest.TestFailureException("Vertex shader input 'pos' not reflected correctly")
if (refl.inputSignature[1].varName != 'col' or refl.inputSignature[1].compCount != 4):
raise rdtest.TestFailureException("Vertex shader input 'col' not reflected correctly")
if (refl.inputSignature[2].varName != 'uv' or refl.inputSignature[2].compCount != 2):
raise rdtest.TestFailureException("Vertex shader input 'uv' not reflected correctly")
if (refl.outputSignature[0].varName != 'opos' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.Position):
raise rdtest.TestFailureException("Vertex shader output 'opos' not reflected correctly")
if (refl.outputSignature[1].varName != 'outcol' or refl.outputSignature[1].compCount != 4):
raise rdtest.TestFailureException("Vertex shader output 'outcol' not reflected correctly")
if 'vertmain' not in disasm:
raise rdtest.TestFailureException("Vertex shader disassembly failed, entry point not found")
refl: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Fragment)
disasm: str = self.controller.DisassembleShader(pipe.GetGraphicsPipelineObject(), refl, "")
if (refl.inputSignature[0].varName != 'incol' or refl.inputSignature[0].compCount != 4):
raise rdtest.TestFailureException("Fragment shader input 'incol' not reflected correctly")
if (refl.outputSignature[0].varName != 'ocol' or refl.outputSignature[0].compCount != 4 or refl.outputSignature[0].systemValue != rd.ShaderBuiltin.ColorOutput):
raise rdtest.TestFailureException("Fragment shader output 'ocol' not reflected correctly")
if 'fragmain' not in disasm:
raise rdtest.TestFailureException("Fragment shader disassembly failed, entry point not found")
rdtest.log.success("shader reflection and disassembly as expected")
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'opos': [-0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
1: {
'vtx': 1,
'idx': 1,
'opos': [0.0, -0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
2: {
'vtx': 2,
'idx': 2,
'opos': [0.5, 0.5, 0.0, 1.0],
'outcol': [0.0, 1.0, 0.0, 1.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
rdtest.log.success("vertex output is as expected")
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 0.5, 0.5, [0.0, 1.0, 0.0, 1.0])
rdtest.log.success("picked value is as expected")
| moradin/renderdoc | util/test/tests/Vulkan/VK_SPIRV_13_Shaders.py | Python | mit | 3,571 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import subprocess
import sys
from ....tests.helper import pytest
def test_wcsapi_extension(tmpdir):
# Test that we can build a simple C extension with the astropy.wcs C API
setup_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(setup_path, '..', '..', '..', '..'))
env = os.environ.copy()
paths = [str(tmpdir), astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PYTHONPATH')] = str(os.pathsep.join(paths))
# Build the extension
# This used to use subprocess.check_call, but on Python 3.4 there was
# a mysterious Heisenbug causing this to fail with a non-zero exit code
# *unless* the output is redirected. This bug also did not occur in an
# interactive session, so it likely had something to do with pytest's
# output capture
p = subprocess.Popen([sys.executable, 'setup.py', 'install',
'--install-lib={0}'.format(tmpdir),
astropy_path], cwd=setup_path, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Whether the process fails or not this isn't likely to produce a great
# deal of output so communicate should be fine in almost all cases
stdout, stderr = p.communicate()
try:
stdout, stderr = stdout.decode('utf8'), stderr.decode('utf8')
except UnicodeDecodeError:
# Don't try to guess about encoding; just display the text
stdout, stderr = stdout.decode('latin1'), stderr.decode('latin1')
# If compilation fails, we can skip this test, since the
# dependencies necessary to compile an extension may be missing.
# If it passes, however, we want to continue and ensure that the
# extension created is actually usable. However, if we're on
# Travis-CI, or another generic continuous integration setup, we
# don't want to ever skip, because having it fail in that
# environment probably indicates something more serious that we
# want to know about.
if (not (str('CI') in os.environ or
str('TRAVIS') in os.environ or
str('CONTINUOUS_INTEGRATION') in os.environ) and
p.returncode):
pytest.skip("system unable to compile extensions")
return
assert p.returncode == 0, (
"setup.py exited with non-zero return code {0}\n"
"stdout:\n\n{1}\n\nstderr:\n\n{2}\n".format(
p.returncode, stdout, stderr))
code = """
import sys
import wcsapi_test
sys.exit(wcsapi_test.test())
"""
code = code.strip().replace('\n', '; ')
# Import and run the extension
subprocess.check_call([sys.executable, '-c', code], env=env)
| piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/wcs/tests/extension/test_extension.py | Python | mit | 2,891 |
class WordDistance(object):
def __init__(self, words):
"""
initialize your data structure here.
:type words: List[str]
"""
self.word_dict = {}
for idx, w in enumerate(words):
self.word_dict[w] = self.word_dict.get(w, []) + [idx]
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
return min(abs(i - j) for i in self.word_dict[word1] for j in self.word_dict[word2])
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2") | Mlieou/leetcode_python | leetcode/python/ex_244.py | Python | mit | 791 |
import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.jupyterhub.provider import (
JupyterHubProvider,
)
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
class JupyterHubAdapter(OAuth2Adapter):
provider_id = JupyterHubProvider.id
settings = app_settings.PROVIDERS.get(provider_id, {})
provider_base_url = settings.get("API_URL", '')
access_token_url = '{0}/hub/api/oauth2/token'.format(provider_base_url)
authorize_url = '{0}/hub/api/oauth2/authorize'.format(provider_base_url)
profile_url = '{0}/hub/api/user'.format(provider_base_url)
def complete_login(self, request, app, access_token, **kwargs):
headers = {
'Authorization': 'Bearer {0}'.format(access_token)
}
extra_data = requests.get(self.profile_url, headers=headers)
user_profile = extra_data.json()
return self.get_provider().sociallogin_from_response(
request,
user_profile
)
oauth2_login = OAuth2LoginView.adapter_view(JupyterHubAdapter)
oauth2_callback = OAuth2CallbackView.adapter_view(JupyterHubAdapter)
| AltSchool/django-allauth | allauth/socialaccount/providers/jupyterhub/views.py | Python | mit | 1,219 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalPermissionsMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.string import format_repr
class CategoryPrincipal(PrincipalPermissionsMixin, db.Model):
__tablename__ = 'principals'
principal_backref_name = 'in_category_acls'
principal_for = 'Category'
unique_columns = ('category_id',)
allow_networks = True
allow_category_roles = True
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='categories')
#: The ID of the acl entry
id = db.Column(
db.Integer,
primary_key=True
)
#: The ID of the associated event
category_id = db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
nullable=False,
index=True
)
# relationship backrefs:
# - category (Category.acl_entries)
def __repr__(self):
return format_repr(self, 'id', 'category_id', 'principal', read_access=False, full_access=False, permissions=[])
| pferreir/indico | indico/modules/categories/models/principals.py | Python | mit | 1,366 |
#!/usr/bin/env python
# $Id: tkhello2.py,v 1.1 2000/02/21 09:04:25 wesc Exp $
#
# tkhello2.py -- "Hello World!" 2 in Tkinter:
# - "Hello World!" with just a button (which quits the app)
#
# created by wesc 00/02/20
#
# import Tkinter module
import Tkinter
# create toplevel window
top = Tkinter.Tk()
# create button
quit = Tkinter.Button(top, text='Hello World!', command=top.quit)
# pack button
quit.pack()
# enter main loop
Tkinter.mainloop()
| opensvn/test | src/study/python/cpp/ch19/alt/tkhello2.py | Python | gpl-2.0 | 457 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blocks', '0004_auto_20160305_2025'),
]
operations = [
migrations.AddField(
model_name='blockmodel',
name='difficulty',
field=models.FloatField(default=1.0, help_text='real number between -1 (easiest) and 1 (most difficult)'),
),
]
| effa/flocs | blocks/migrations/0005_blockmodel_difficulty.py | Python | gpl-2.0 | 475 |
# -*- coding: utf-8 -*-
import struct
class GGZChessFeedback:
def onSeat(self, seatNum, version):
pass
def onPlayers(self, whiteType, whiteName, blackType, blackName):
pass
def onClockRequest(self):
pass
def onClock(self, mode, seconds):
pass
def onStart(self):
pass
def onMove(self, move):
pass
class Chess:
CLOCK_NONE = 0
CLOCK_CLIENT = 1
CLOCK_SERVERLAG = 2
CLOCK_SERVER = 3
# Player codes (copied from GGZ_SEAT_*)
GGZ_SEAT_NONE = '\x00' # This seat does not exist */
GGZ_SEAT_OPEN = '\x01' # The seat is open (unoccupied).
GGZ_SEAT_BOT = '\x02' # The seat has a bot (AI) in it.
GGZ_SEAT_PLAYER = '\x03' # The seat has a regular player in it.
GGZ_SEAT_RESERVED = '\x04' # The seat is reserved for a player.
GGZ_SEAT_ABANDONED = '\x05' # The seat is abandoned by a player.
def sendClock(self, mode, seconds):
return '\x04' + struct.pack('!I', (mode << 24) | (seconds & 0x00FFFFFF))
def sendMove(self, move, time = None):
cmd = '\x06' + struct.pack('!I', len(move)) + move
if time is not None:
cmd += struct.pack('!I', time)
return cmd
def sendStart(self):
return '\x05'
def __init__(self, feedback):
self.feedback = feedback
self.decodeMethod = None
self.decodeMethods = {'\x01': self.decodeSeat,
'\x02': self.decodePlayers,
'\x03': self.decodeClockRequest,
'\x04': self.decodeClock,
'\x05': self.decodeStart,
# 6: Move request
'\x07': self.decodeMove,
'\x08': self.decodeGameEnd,
#9: Update request
'\x0a': self.decodeUpdate,
#11: server time update?
#12: self.decodeFlag,
'\x0d': self.decodeDraw}
def decode(self, char):
if self.decodeMethod is None:
try:
self.decodeMethod = self.decodeMethods[char]
except KeyError:
self.decodeMethod = None
print 'Unknown data received: %s' % repr(char)
return
self.command = ''
self.command += char
self.decodeMethod()
def getGGZString(self, buffer):
if len(buffer) < 4:
return (None, 0)
(length,) = struct.unpack("!I", buffer[:4])
if len(buffer) < length + 4:
return (None, 0)
string = buffer[4:length + 4]
# Strip C null characters
while string.endswith('\x00'):
string = string[:-1]
return (string, length + 4)
def decodeSeat(self):
# seat [01][num][version]
if len(self.command) == 3:
self.decodeMethod = None
(num, version) = struct.unpack('!xBB', self.command)
self.feedback.onSeat(num, version)
def decodePlayers(self):
# players [02][code1][name1(18)][code2][name2(18)]
# name is ommitted if code == 01 (open)
requiredLength = 2
if len(self.command) < requiredLength:
return
whiteCode = self.command[1]
if whiteCode == self.GGZ_SEAT_OPEN:
requiredLength += 1
whiteName = ''
else:
(whiteName, offset) = self.getGGZString(self.command[requiredLength:])
if whiteName is None:
return
requiredLength += 1 + offset
if len(self.command) < requiredLength:
return
blackCode = self.command[requiredLength - 1]
if blackCode == self.GGZ_SEAT_OPEN:
blackName = ''
else:
(blackName, offset) = self.getGGZString(self.command[requiredLength:])
if blackName is None:
return
requiredLength += offset
if len(self.command) >= requiredLength:
self.decodeMethod = None
self.feedback.onPlayers(whiteCode, whiteName, blackCode, blackName)
def decodeClockRequest(self):
# [3]
self.decodeMethod = None
self.feedback.onClockRequest()
def decodeClock(self):
# [4][mode][seconds]
if len(self.command) == 5:
(value,) = struct.unpack("!xI", self.command)
mode = value >> 24
seconds = value & 0x00FFFFFF
self.feedback.onClock(mode, seconds)
self.decodeMethod = None
def decodeStart(self):
# [5]
self.decodeMethod = None
self.feedback.onStart()
def decodeMove(self):
# [07][move(8)]
# or [07][move(8)][seconds]
(move, _) = self.getGGZString(self.command[1:])
if move is None:
return
self.decodeMethod = None
self.feedback.onMove(move)
def decodeGameEnd(self):
# [08][result]
if len(self.command) == 2:
self.decodeMethod = None
def decodeUpdate(self):
# [0A][wtime][btime]
if len(self.command) == 9:
self.decodeMethod = None
def decodeDraw(self):
# [0D]
self.decodeMethod = None
| gfunkmonk2/mate-games | glchess/src/lib/ggz/chess.py | Python | gpl-2.0 | 5,544 |
import Image
import numpy
import pylab
import vlfeat
from vlfeat.plotop.vl_plotframe import vl_plotframe
if __name__ == '__main__':
""" VL_DEMO_SIFT_EDGE Demo: SIFT: edge treshold
"""
I = numpy.zeros([100, 500])
for i in 10 * (1+numpy.arange(9)):
d = numpy.round(i / 3.0)
I[50-d-1:50+d-1, i * 5-1] = 1
I = numpy.array(I, 'f', order='F')
I = 2 * numpy.pi * 8 ** 2 * vlfeat.vl_imsmooth(I, 8)
I = 255 * I
I = numpy.array(I, 'f', order='F')
print 'sift_edge_0'
ter = [3.5, 5, 7.5, 10]
for te in ter:
f, d = vlfeat.vl_sift(I, peak_thresh=0.0, edge_thresh=te)
pylab.figure()
pylab.gray()
pylab.imshow(I)
vl_plotframe(f, color='k', linewidth=3)
vl_plotframe(f, color='y', linewidth=2)
pylab.show() | mmmikael/vlfeat | python/vlfeat/demo/vl_demo_sift_edge.py | Python | gpl-2.0 | 733 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
| nirvn/QGIS | python/plugins/processing/algs/qgis/SelectByAttribute.py | Python | gpl-2.0 | 5,528 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat - Unit Test Suite"""
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
from invenio.bibformat_utils import words_start_with_patterns, \
cut_out_snippet_core_creation
class WordsStartsWithPatternTest(InvenioTestCase):
"""Test for words start with pattern functionality"""
def test_word_starts_with_single_pattern(self):
"""bibformat - word starts with single pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['Thiss'], ['tHis']))
def test_word_starts_with_multi_pattern(self):
"""bibformat - word starts with multi pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this', 'is', 'a']))
self.assertEqual((False, 0), words_start_with_patterns(['i'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['is'], ['this', 'is', 'a']))
def test_words_start_with_single_pattern(self):
"""bibformat - words start with single pattern"""
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['thi']))
self.assertEqual((False, 0), words_start_with_patterns(['thi', 'this'], ['this']))
def test_words_start_with_multi_pattern(self):
"""bibformat - words start with multi pattern"""
# Only the first word is considered
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['this', 'it']))
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['it', 'thi']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['it', 'if']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['is', 'if']))
def test_words_start_with_phrase(self):
"""bibformat - words start with phrase"""
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['this is a']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]']))
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]', 'this is a']))
self.assertEqual((False,0), words_start_with_patterns(['this', 'is'], ['no I do not', 'this is a']))
class SnippetCutOutCoreCreation(InvenioTestCase):
"""Test for snippet cut out core creation"""
_words = dict()
_words[0] = ['CERN', 'LIBRARIES,', 'GENEVA', 'SCAN-0005061', 'Development', 'of', 'Photon', 'Beam', 'Diagnostics',
'for','VUV', 'Radiation', 'from', 'a', 'SASE', 'FEL', 'R.', 'Treusch', '1,', 'T.', 'Lokajczyk,', 'W.',
'Xu', '2,','U.', 'Jastrow,', 'U.', 'Hahn,', 'Abstract', 'L.', 'Bittner', 'and', 'J.', 'Feldhaus',
'HASYLAB', 'at', 'DESY,', 'Notkcstr.', '85,', 'D\xe2\x80\x94226`U3', 'Hamburg,', 'Germany', 'For',
'the', 'proof-of-principle', 'experiment', 'of', 'self-amplified', 'spontaneous', 'emission', '[SASE)',
'at', 'short', 'wavelengths', 'on', 'the', 'VUV', 'FEL', 'at', 'DESY', 'a', 'multi-facetted', 'photon',
'beam', 'diagnostics', 'experiment', 'has', 'been', 'developed', 'employing', 'new', 'detection',
'concepts', 'to', 'measure', 'all', 'SASE', 'specific', 'properties', 'on', 'a', 'single', 'pulse',
'basis.', 'The', 'present', 'setup', 'includes', 'instrumentation', 'for', 'the', 'measurement', 'of',
'the', 'energy', 'and', 'the', 'angular', 'and', 'spectral', 'distribution', 'of', 'individual', 'photon',
'pulses.', 'Different', 'types', 'of', 'photon', 'detectors', 'such', 'as', 'PtSi-photodiodes', 'and']
def test_term_cut_out(self):
"""bibformat - term snippet cut out core creation"""
self.assertEqual(('This', 0, 0), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This'], 50))
self.assertEqual(('This is a test', 0, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This' ,'test'], 50))
self.assertEqual(('is', 1, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is'], 50))
self.assertEqual(('is a new', 1, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'new', 'test'], ['is', 'new'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['new'], 50))
self.assertEqual(('of', 5, 5), cut_out_snippet_core_creation(self._words[0], ['of'], 100))
def test_phrase_cut_out(self):
"""bibformat - phrase snippet cut out core creation"""
self.assertEqual(('This is', 0, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is'], 50))
self.assertEqual(('This is a', 0, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is a'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This not'], 50))
self.assertEqual(('is a', 1, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is a'], 50))
self.assertEqual(('of the', 92, 93), cut_out_snippet_core_creation(self._words[0], ['of the'], 100))
TEST_SUITE = make_test_suite(WordsStartsWithPatternTest,
SnippetCutOutCoreCreation,
)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
| labordoc/labordoc-next | modules/bibformat/lib/bibformat_utils_unit_tests.py | Python | gpl-2.0 | 6,733 |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .macd import *
from .momentum import *
from .oscillator import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .rsi import *
from .stochastic import *
from .trix import *
from .williams import *
| YuepengGuo/backtrader | backtrader/indicators/__init__.py | Python | gpl-3.0 | 2,021 |
import boto,sys,euca_admin
from boto.exception import EC2ResponseError
from euca_admin.generic import BooleanResponse
from euca_admin import EucaAdmin
from optparse import OptionParser
SERVICE_PATH = '/services/Configuration'
class Walrus():
def __init__(self, walrus_name=None, host_name=None, port=None):
self.walrus_name = walrus_name
self.host_name = host_name
self.euca = EucaAdmin(path=SERVICE_PATH)
def __repr__(self):
return 'WALRUS\t%s\t%s' % (self.walrus_name, self.host_name)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'euca:detail':
self.host_name = value
elif name == 'euca:name':
self.walrus_name = value
else:
setattr(self, name, value)
def describe(self):
parser = OptionParser("usage: %prog [options]",version="Eucalyptus %prog VERSION")
(options, args) = parser.parse_args()
try:
list = self.euca.connection.get_list('DescribeWalruses', {}, [('euca:item', Walrus)])
for i in list:
print i
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def get_register_parser(self):
parser = OptionParser("usage: %prog [options]",version="Eucalyptus %prog VERSION")
parser.add_option("-H","--host",dest="walrus_host",help="Hostname of the walrus.")
parser.add_option("-p","--port",dest="walrus_port",type="int",default=8773,help="Port for the walrus.")
return parser
def register(self, walrus_name, walrus_host, walrus_port=8773):
try:
reply = self.euca.connection.get_object('RegisterWalrus', {'Name':'walrus','Host':walrus_host,'Port':walrus_port}, BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def get_deregister_parser(self):
parser = OptionParser("usage: %prog [options]",version="Eucalyptus %prog VERSION")
parser.add_option("-n","--name",dest="walrus_name",help="Name of the walrus.")
return parser
def deregister(self, walrus_name):
try:
reply = self.euca.connection.get_object('DeregisterWalrus', {'Name':walrus_name},BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
| Shebella/HIPPO | clc/tools/src/euca_admin/walruses.py | Python | gpl-3.0 | 2,284 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SocialAccount.uid'
db.alter_column(u'socialaccount_socialaccount', 'uid', self.gf('django.db.models.fields.CharField')(max_length=191))
# Changing field 'SocialApp.secret'
db.alter_column(u'socialaccount_socialapp', 'secret', self.gf('django.db.models.fields.CharField')(max_length=191))
# Changing field 'SocialApp.client_id'
db.alter_column(u'socialaccount_socialapp', 'client_id', self.gf('django.db.models.fields.CharField')(max_length=191))
# Changing field 'SocialApp.key'
db.alter_column(u'socialaccount_socialapp', 'key', self.gf('django.db.models.fields.CharField')(max_length=191))
def backwards(self, orm):
# Changing field 'SocialAccount.uid'
db.alter_column(u'socialaccount_socialaccount', 'uid', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'SocialApp.secret'
db.alter_column(u'socialaccount_socialapp', 'secret', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'SocialApp.client_id'
db.alter_column(u'socialaccount_socialapp', 'client_id', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'SocialApp.key'
db.alter_column(u'socialaccount_socialapp', 'key', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '191'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '191'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '191', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '191'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
},
u'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialaccount.SocialApp']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.TextField', [], {}),
'token_secret': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['socialaccount'] | Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/allauth/socialaccount/south_migrations/0013_auto__chg_field_socialaccount_uid__chg_field_socialapp_secret__chg_fie.py | Python | gpl-3.0 | 7,605 |
# -*- coding: utf-8 -*-
#
# This file is part of Linux Show Player
#
# Copyright 2012-2016 Francesco Ceruti <ceppofrancy@gmail.com>
#
# Linux Show Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Linux Show Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Linux Show Player. If not, see <http://www.gnu.org/licenses/>.
import mido
from lisp.modules.midi.midi_common import MIDICommon
from lisp.modules.midi.midi_utils import mido_backend, mido_port_name
class MIDIOutput(MIDICommon):
def __init__(self, port_name='AppDefault'):
super().__init__(port_name=port_name)
def send_from_str(self, str_message):
self.send(mido.parse_string(str_message))
def send(self, message):
self._port.send(message)
def open(self):
port_name = mido_port_name(self._port_name, 'O')
self._port = mido_backend().open_output(port_name)
| FrancescoCeruti/linux-show-player | lisp/modules/midi/midi_output.py | Python | gpl-3.0 | 1,339 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
# Import all fixtures from PyInstaller into the tests.
from PyInstaller.utils.conftest import *
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/tests/conftest.py | Python | gpl-3.0 | 524 |
# Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from urllib import urlencode
import logging
from sickbeard import tvcache
from sickbeard.providers import generic
from sickbeard.common import USER_AGENT
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.public = True
self.ratio = None
self.confirmed = True
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.urls = {
'base_url': 'https://pirateproxy.la/',
'search': 'https://pirateproxy.la/s/',
'rss': 'https://pirateproxy.la/tv/latest'
}
self.url = self.urls[b'base_url']
self.headers.update({'User-Agent': USER_AGENT})
"""
205 = SD, 208 = HD, 200 = All Videos
https://thepiratebay.gd/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
self.search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
self.re_title_url = r'/torrent/(?P<id>\d+)/(?P<title>.*?)".+?(?P<url>magnet.*?)".+?Size (?P<size>[\d\.]* [TGKMiB]{2,3}).+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys():
logging.debug("Search Mode: %s" % mode)
for search_string in search_strings[mode]:
self.search_params.update({'q': search_string.strip()})
if mode is not 'RSS':
logging.debug("Search string: " + search_string)
searchURL = self.urls[('search', 'rss')[mode is 'RSS']] + '?' + urlencode(self.search_params)
logging.debug("Search URL: %s" % searchURL)
data = self.getURL(searchURL)
# data = self.getURL(self.urls[('search', 'rss')[mode is 'RSS']], params=self.search_params)
if not data:
continue
matches = re.compile(self.re_title_url, re.DOTALL).finditer(data)
for torrent in matches:
title = torrent.group('title')
download_url = torrent.group('url')
# id = int(torrent.group('id'))
size = self._convertSize(torrent.group('size'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logging.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers))
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search(r'(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
if mode is not 'RSS':
logging.debug(
"Found result %s but that doesn't seem like a trusted result so I'm ignoring it" % title)
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logging.debug("Found result: %s " % title)
items[mode].append(item)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KiB':
size = size * 1024
elif modifier in 'MiB':
size = size * 1024 ** 2
elif modifier in 'GiB':
size = size * 1024 ** 3
elif modifier in 'TiB':
size = size * 1024 ** 4
return size
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll ThePirateBay every 30 minutes max
self.minTime = 30
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = ThePirateBayProvider()
| mcus/SickRage | sickbeard/providers/thepiratebay.py | Python | gpl-3.0 | 5,726 |
#
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# misc utilities
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import types
class seq_with_cursor (object):
__slots__ = [ 'items', 'index' ]
def __init__ (self, items, initial_index = None, initial_value = None):
assert len (items) > 0, "seq_with_cursor: len (items) == 0"
self.items = items
self.set_index (initial_index)
if initial_value is not None:
self.set_index_by_value(initial_value)
def set_index (self, initial_index):
if initial_index is None:
self.index = len (self.items) / 2
elif initial_index >= 0 and initial_index < len (self.items):
self.index = initial_index
else:
raise ValueError
def set_index_by_value(self, v):
"""
Set index to the smallest value such that items[index] >= v.
If there is no such item, set index to the maximum value.
"""
self.set_index(0) # side effect!
cv = self.current()
more = True
while cv < v and more:
cv, more = next(self) # side effect!
def __next__ (self):
new_index = self.index + 1
if new_index < len (self.items):
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def prev (self):
new_index = self.index - 1
if new_index >= 0:
self.index = new_index
return self.items[new_index], True
else:
return self.items[self.index], False
def current (self):
return self.items[self.index]
def get_seq (self):
return self.items[:] # copy of items
| jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gru/seq_with_cursor.py | Python | gpl-3.0 | 1,933 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from gi.repository import Gtk
from GTG import _
class PasswordUI(Gtk.Box):
'''Widget displaying a gtk.Label and a textbox to input a password'''
def __init__(self, req, backend, width):
'''Creates the gtk widgets and loads the current password in the text
field
@param req: a Requester
@param backend: a backend object
@param width: the width of the Gtk.Label object
'''
super(PasswordUI, self).__init__()
self.backend = backend
self.req = req
self._populate_gtk(width)
self._load_password()
self._connect_signals()
def _populate_gtk(self, width):
'''Creates the text box and the related label
@param width: the width of the Gtk.Label object
'''
password_label = Gtk.Label(label=_("Password:"))
password_label.set_alignment(xalign=0, yalign=0.5)
password_label.set_size_request(width=width, height=-1)
self.pack_start(password_label, False, True, 0)
align = Gtk.Alignment.new(0, 0.5, 1, 0)
align.set_padding(0, 0, 10, 0)
self.pack_start(align, True, True, 0)
self.password_textbox = Gtk.Entry()
align.add(self.password_textbox)
def _load_password(self):
'''Loads the password from the backend'''
password = self.backend.get_parameters()['password']
self.password_textbox.set_invisible_char('*')
self.password_textbox.set_visibility(False)
self.password_textbox.set_text(password)
def _connect_signals(self):
'''Connects the gtk signals'''
self.password_textbox.connect('changed', self.on_password_modified)
def commit_changes(self):
'''Saves the changes to the backend parameter ('password')'''
password = self.password_textbox.get_text()
self.backend.set_parameter('password', password)
def on_password_modified(self, sender):
''' Signal callback, executed when the user edits the password.
Disables the backend. The user will re-enable it to confirm the changes
(s)he made.
@param sender: not used, only here for signal compatibility
'''
if self.backend.is_enabled() and not self.backend.is_default():
self.req.set_backend_enabled(self.backend.get_id(), False)
| partp/gtg-services | GTG/gtk/backends_dialog/parameters_ui/passwordui.py | Python | gpl-3.0 | 3,286 |
#!/usr/bin/env python
"Module to aggregate all pdf figures of a directory \
into a single latex file, and compile it."
from __future__ import division, print_function
import os
import sys
import re
from optparse import OptionParser
_VERSION = '1.0'
def latex_dir(outfile_name, directory, column=2, eps=False):
"Print latex source file"
print(directory)
with open(outfile_name, 'w') as outfile:
outfile.write(r"""\documentclass[10pt]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8x]{inputenc}
\usepackage{fancyhdr}
\def\goodgap{\hspace{\subfigtopskip}\hspace{\subfigbottomskip}}
\usepackage%(include_pdf_package_option)s{graphicx}
\usepackage{subfigure,a4wide}
%%set dimensions of columns, gap between columns, and paragraph indent
\setlength{\textheight}{8in}
%%\setlength{\textheight}{9.3in}
\setlength{\voffset}{0.5in}
\setlength{\topmargin}{-0.55in}
%%\setlength{\topmargin}{0in}
\setlength{\headheight}{12.0pt}
%%\setlength{\headsep}{0.0in}
%%\setlength{\textwidth}{7.43in}
\setlength{\textwidth}{7.10in}
%%\setlength{\textwidth}{6in}
\setlength{\hoffset}{-0.4in}
\setlength{\columnsep}{0.25in}
\setlength{\oddsidemargin}{0.0in}
\setlength{\evensidemargin}{0.0in}
%% more than .95 of text and figures
\def\topfraction{.95}
\def\floatpagefraction{.95}
\def\textfraction{.05}
\newcommand{\mydefaultheadersandfooters}
{
\chead{\today}
\rhead{\thepage}
\lfoot{}
\cfoot{}
\rfoot{}
}
\title{Automatically generated latex for directory %(title)s}
\author{%(login)s}
\begin{document}
\pagestyle{fancy}
\mydefaultheadersandfooters
\maketitle
\clearpage
""" % {'title': directory.replace('_', '\_'),
'login': os.getenv('LOGNAME').capitalize(),
'include_pdf_package_option': '' if eps else '[pdftex]'})
files = os.listdir(os.getcwd() + '/' + directory)
# exclude_filename = outfile.name.split('/')[-1].replace('.tex', '.pdf')
exclude_filename = 'latex_dir_'
pattern = re.compile(r'(?!%s)\S+\.%s' % (exclude_filename,
('eps' if eps else 'pdf')))
count = 0
if column == 1:
line_size = .99
elif column == 2:
line_size = .49
else:
print("invalid column size")
raise
nb_floats = 0
for cur_file in sorted(files):
if pattern.match(cur_file):
nb_floats += 1
if column == 1 or count % 2 == 0:
outfile.write(r"\begin{figure}[!ht]"
r"\begin{center}")
outfile.write(r"\subfigure[]{\includegraphics" +
r"[width=%f\textwidth,height=%f\textheight]{%s/%s}}"
% (line_size, .7*line_size, directory, cur_file))
if column == 1 or count % 2 != 0:
outfile.write('\n' + r"\caption{}\end{center}\end{figure}"
+ '\n')
if nb_floats >= 4:
outfile.write(r"\clearpage")
nb_floats = 0
elif count % 2 == 0:
# outfile.write('\goodgap')
pass
else:
print("Double column and modulo is not working on count: %d"
% count)
raise
count += 1
if count % 2 == 1:
outfile.write('\n' + r"\caption{}\end{center}\end{figure}" + '\n')
outfile.write(r"\end{document}")
def main():
"Option parsing and launch latex_dir"
usage = "%prog [-c nb_of_columns -w outtexfile] directory_list"
parser = OptionParser(usage = usage)
parser.add_option('-w', dest='outtexfile', type='string',
help='output latex file (default is dir/latex_dir.tex)')
parser.add_option('-c', dest='column', type='int', default = 2,
help='number of columns of latex file: 1 or 2')
parser.add_option('--eps', dest='eps', default=False, action='store_true',
help='use eps files instead of pdf')
(options, args) = parser.parse_args()
if not args:
parser.print_help()
exit(5)
for directory in args:
if not options.outtexfile:
outfile_name = os.sep.join((directory,
'latex_dir_%s.tex' % directory))
else:
outfile_name = options.outtexfile
if options.column not in (1, 2):
print("invalid number of columns")
parser.print_help()
exit(5)
latex_dir(outfile_name, directory, options.column, eps=options.eps)
#compile the tex file
if options.eps:
os.execlp('latex', 'latex', '-interaction=nonstopmode',
'-output-directory', directory, outfile_name)
else:
os.execlp('pdflatex', 'pdflatex', '-interaction=nonstopmode',
'-output-directory', directory, outfile_name)
if __name__ == '__main__':
sys.exit(main())
| LouisPlisso/analysis_tools | latex_directory_1_2.py | Python | gpl-3.0 | 5,070 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
WINDOWS_MSVC = {
"cmake": "3.7.2",
"llvm": "6.0.0",
"moztools": "0.0.1-5",
"ninja": "1.7.1",
"openssl": "1.1.0e-vs2015",
}
| dati91/servo | python/servo/packages.py | Python | mpl-2.0 | 343 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name": "Export Customers - Exporting customer's data in .csv file for italian fiscal program.",
'version': '2.0.1.0',
'category': 'Generic Modules/Sales customers',
"description": """Exporting customer's data in .csv file """,
"author": "Didotech SRL",
'website': 'http://www.didotech.com',
"depends": [
"base",
"base_partner_ref",
"sale",
"account",
#"l10n_it",
"l10n_it_base",
"l10n_it_account"
],
"init_xml": [],
"update_xml": [
"security/security.xml",
"export_customers.xml"
],
"demo_xml": [],
"installable": True,
"active": False,
}
| iw3hxn/LibrERP | export_customers/__openerp__.py | Python | agpl-3.0 | 2,043 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import TrackStatsHookBase
class ModelHookManager:
"""
This class registers and manages a set of hooks of subclassed from
`TrackStatsHookBase`. The given hook is registered on all modules within
'named_modules'.
Tracking is started and stopped for all hooks via `self.start_tracking()` and
`self.stop_tracking()`. Alternatively, this class can be used a context manager to
automate these calls. For example,
```
with hook_manager as hooks:
... # Train here
stats = hooks.get_statitics()
```
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function that
takes a name and module as inputs and then outputs a dictionary of
arguments to pass to the hook
"""
def __init__(
self,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
assert hook_type in ["forward", "backward", "pre_forward"]
assert issubclass(hook_class, TrackStatsHookBase)
# Register the hooks via class method.
tracked_vals = self.register_storage_hooks(named_modules,
hook_class=hook_class,
hook_type=hook_type,
hook_args=hook_args)
# These are the functions that called every forward or backward pass.
self.hooks = tracked_vals[0]
# These are handles to the hooks; PyTorch lets the user unregister
# hooks through these handles.
self._hook_handles = tracked_vals[1]
# These are the filtered modules that will be tracked.
self.tracked_modules = tracked_vals[2]
# Keep track of whether tracking is on.
self._tracking = False
@property
def tracking(self):
return self._tracking
def __enter__(self):
"""Start tracking when `with` is called."""
self.start_tracking()
return self
def __exit__(self, *args):
"""Stop tracking when `with` block is left."""
self.stop_tracking()
@classmethod
def register_storage_hooks(
cls,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
"""
Register hook on each module in 'named_modules'.
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function
that takes a name and module as inputs and then outputs a
dictionary of arguments to pass to the hook
"""
assert hook_type in ["forward", "backward", "pre_forward"]
hooks = []
handles = []
tracked_modules = dict()
# Register hooks on the modules.
for n, m in named_modules.items():
if callable(hook_args):
args = hook_args(n, m)
else:
args = hook_args or {}
hook = hook_class(name=n, **args)
if hook_type == "forward":
handle = m.register_forward_hook(hook)
elif hook_type == "pre_forward":
handle = m.register_forward_pre_hook(hook)
else:
handle = m.register_backward_hook(hook)
hooks.append(hook)
handles.append(handle)
tracked_modules[n] = m
return hooks, handles, tracked_modules
def start_tracking(self):
self._tracking = True
for hook in self.hooks:
hook.start_tracking()
def stop_tracking(self):
self._tracking = False
for hook in self.hooks:
hook.stop_tracking()
def get_statistics(self):
"""
This returns a generator with elements
`(name, module, statistic_0, ..., statistic_n)`.
"""
return (
(name, module, *hook.get_statistics())
for (name, module), hook in zip(self.tracked_modules.items(), self.hooks)
)
def remove_hooks(self):
"""
Remove all hooks from the model and stop tracking statistics.
"""
for handle in self._hook_handles:
handle.remove()
self.hooks = []
self._hook_handles = []
self.tracked_modules = dict()
| mrcslws/nupic.research | src/nupic/research/frameworks/pytorch/hooks/hook_manager.py | Python | agpl-3.0 | 5,818 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv import osv, fields
from openerp.tools import translate
class account_account(osv.Model):
_name = "account.account"
_inherit = "account.account"
#Change the way that the user can see the account when is search in a many2one field.
#Add the company prefix in the name of the company and the shortcurt of the parent's account.
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
#Avoid problem when only an account is selected
if isinstance(ids, int):
accounts = [self.browse(cr,uid,ids)]
else:
accounts = self.browse(cr,uid,ids)
for obj_account in accounts:
obj_company = self.pool.get('res.company').browse(cr,uid,obj_account.company_id.id)
#If the company of the account have prefix, add in the account's name.
prefix= obj_company.prefix
if prefix == False:
prefix = ''
data = []
account = obj_account.parent_id
#Add the parent's name shortcut.
if account.parent_id:
while account.parent_id:
data.insert(0,(account.shortcut or account.name))
account = account.parent_id
data.append(obj_account.name)
data = '/'.join(data)
data = obj_account.code + ' ' + data
data = prefix and prefix + '-' + data or data
else:
#If there not exist a parent, concatenated the account's name.
data.append(obj_account.name)
data = '/'.join(data)
data = prefix and prefix + ' ' + data or data
res.append((obj_account.id, data))
return res
#Add the company prefix and the regular expression that permit search include the special characters.
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
account_ids = company_ids = search_domains = []
dict_prefix = {}
regular_expresion_number = '^[0-9.-_]+$'
if not args:
args = []
#Code doesn't start with first word by numbers or special characters
#Name doesn't start with numbers.
if name:
piece_1 = piece_2 = piece_3 = ''
#Method partition return a tuple that contains the first part before the separator (in this case ' ') and the other position
#is the rest of the sentence.
temp_partition = name.partition(' ')
piece_1 = temp_partition[0]
piece_2 = temp_partition[2]
company_ids = self.pool.get('res.company').search(cr, uid, [])
companies = self.pool.get('res.company').browse(cr, uid, company_ids)
for company in companies:
if company.prefix:
dict_prefix[company.id] = company.prefix
#1.If prefixes and Company.
# dict_prefix has the id of the company with its own prefix
if dict_prefix:
for id, prefix in dict_prefix.iteritems():
if piece_1.lower() in prefix.lower():
company_ids.append(id)
if company_ids: #Companies that match the prefix
#If the prefix is a number
if re.match(regular_expresion_number, piece_1):
if piece_2: #If something is typed after the prefix.
piece_2_b = piece_2.partition(' ')[0]
#if a number is first
if re.match(regular_expresion_number, piece_2_b):
search_domains.append({
'company_ids':company_ids,
'code':piece_2_b,
'name':piece_2.partition(' ')[2]
})
else:
#If anything.
search_domains.append({
'company_ids':company_ids,
'name':piece_2
})
search_domains.append({'code': piece_1,
'name':piece_2})
else:
#If this is not addressed then the prefix digit
search_domains.append({'company_ids':company_ids})
search_domains.append({'code':piece_1})
else:
#if the prefix is not a number
#If something is typed after the prefix
if piece_2:
piece_2_b = piece_2.partition(' ')[0]
piece_3 = piece_2.partition(' ')[2]
#If it is a number
if re.match(regular_expresion_number, piece_2_b):
search_domains.append({
'company_ids':company_ids,
'code':piece_2_b,
'name':piece_3
})
else:
#If it is not a number
search_domains.append({
'company_ids':company_ids,
'name':piece_2})
search_domains.append({'name':name})
else:
#If not then you type the prefix
search_domains.append({'company_ids':company_ids})
search_domains.append({'name':name})
else:
#If the prefix is not a number
if re.match(regular_expresion_number, piece_1):
search_domains.append({
'code':piece_1,
'name':piece_2
})
else:
search_domains.append({'name':name})
#If there is no prefix.
else:
if re.match(regular_expresion_number, piece_1):
search_domains.append({
'code':piece_1,
'name':piece_2
})
else:
search_domains.append({'name':name})
#Build the search domain for the account browser.
search_domain = []
regular_expresion = '%'
for domain in search_domains:
temp_domain = []
if 'company_ids' in domain.keys():
temp_domain.append(('company_id','in', domain['company_ids']))
if 'code' in domain.keys():
code = domain['code']
code = code.replace('-','').replace('_', '').replace('.','')
new_code = regular_expresion
for c in code:
new_code += c + regular_expresion
temp_domain.append(('code', '=like', new_code))
if 'name' in domain.keys():
if domain['name']:
temp_domain.append(('name', operator, domain['name']))
#Depend of the quantity of domain, add the & or the '|'.
if len(temp_domain) == 1:
search_domain += temp_domain
elif len(temp_domain) == 2:
search_domain.append('&')
search_domain += temp_domain
else:
search_domain.append('&')
search_domain.append('&')
search_domain += temp_domain
number_or = (len(search_domains) / 2) - 1
cont = 0
while cont < number_or:
search_domain = ['|'] + search_domain
cont += 1
account_ids = self.pool.get('account.account').search(cr, uid, search_domain + args, limit=limit, context=context)
else:
account_ids = self.pool.get('account.account').search(cr, uid, [] +args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context) #search the names that match with the ids.
class account_journal(osv.Model):
_name = "account.journal"
_inherit = "account.journal"
#Add the company prefix to the journal name.
def name_get(self, cr, user, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
result = self.browse(cr, user, ids, context=context)
res = []
for rs in result:
obj_company = self.pool.get('res.company').browse(cr,user,rs.company_id.id)
prefix= obj_company.prefix
if prefix == False:
prefix = ''
data = []
data.append(rs.code)
data.append(rs.name)
data = ' - '.join(data)
data = prefix and prefix + ' ' + data or data
res.append((rs.id, data))
return res
#Add company prefix to the journal search.
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
#TODO: Pass comments to english
journal_ids = company_ids = search_domains = []
dict_prefix = {}
if not args:
args = []
if name:
piece_1 = piece_2 = ''
#Method partition return a tuple that contains the first part before the separator (in this case ' ') and the other position
#is the rest of the sentence.
temp_partition = name.partition(' ')
piece_1 = temp_partition[0]
piece_2 = temp_partition[2]
company_ids = self.pool.get('res.company').search(cr, uid, [])
companies = self.pool.get('res.company').browse(cr, uid, company_ids)
for company in companies:
if company.prefix:
dict_prefix[company.id] = company.prefix
#1. if have prefix and company
# dict_prefix has the id of the company with your prefix respective
for id, prefix in dict_prefix.iteritems():
if piece_1.lower() in prefix.lower():
company_ids.append(id)
# Both conditions must be met to have prefix.
if dict_prefix and company_ids:
#P2 if there (followed by "typing" after the prefix)
if piece_2:
piece_2_b = piece_2.partition(' ')[0]
piece_3 = piece_2.partition(' ')[2]
#Domains
search_domains.append({
'code':piece_2_b,
'name':piece_3,
'company_ids':company_ids
})
search_domains.append({'company_ids':company_ids,
'name':piece_2})
search_domains.append({'name': name })
search_domains.append({
'code':piece_1,
'name':name,
})
else:
search_domains.append({
'company_ids':company_ids,
'name':piece_1,
'code':piece_1})
#If no prefix ...
else:
if piece_2: #If continued typing
search_domains.append({'name': name })
search_domains.append({
'code':piece_1,
'name':piece_2,
})
#If only one word is typed at the beginning of the search.
else:
search_domains.append({
'code':piece_1,
'name':piece_1,
})
#Build the search domain for the account browser.
search_domain = []
regular_expresion = '%'
for domain in search_domains:
temp_domain = []
if 'company_ids' in domain.keys():
temp_domain.append(('company_id','in', domain['company_ids']))
if 'code' in domain.keys():
code = domain['code']
code = code.replace('-','').replace('_', '').replace('.','')
new_code = regular_expresion
for c in code:
new_code += c + regular_expresion
#ilike is case sensitive
temp_domain.append(('code', 'ilike', new_code))
if 'name' in domain.keys():
if domain['name']:
temp_domain.append(('name', operator, domain['name']))
#Depend of the quantity of domain, add the & or the '|'
#Unlike account can match any change so the '&' by '|'
if len(temp_domain) == 1:
search_domain += temp_domain
elif len(temp_domain) == 2:
search_domain.append('|')
search_domain += temp_domain
else:
search_domain.append('|')
search_domain.append('&')
search_domain += temp_domain
number_or = (len(search_domains) / 2) - 1
cont = 0
while cont < number_or:
search_domain = ['|'] + search_domain
cont += 1
journal_ids = self.pool.get('account.journal').search(cr, uid, search_domain + args, limit=limit, context=context)
else:
journal_ids = self.pool.get('account.journal').search(cr, uid, [] + args, limit=limit, context=context)
return self.name_get(cr, uid, journal_ids, context=context) #search the names that match with the ids.
class account_fiscalyear(osv.Model):
'''
Adds up to 16 chars to a Fiscal year code
'''
_name = 'account.fiscalyear'
_inherit = 'account.fiscalyear'
_columns = {
'code': fields.char('Code', size=16, required=True, help="The code will be used to generate the numbers of the journal entries of this journal."),
}
class account_period(osv.Model):
'''
Adds up to 16 chars to a Fiscal year code
'''
_name = 'account.period'
_inherit = 'account.period'
_columns = {
'code': fields.char('Code', size=16),
} | sysadminmatmoz/odoo-clearcorp | account_name_extended/account_name_extended.py | Python | agpl-3.0 | 17,555 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.exceptions import Warning
from openerp.tools import float_compare
from openerp.tools.translate import _
import product
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
#if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
# continue
total_cost = 0.0
total_qty = move.product_qty
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True, states={'done': [('readonly', True)]}),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
base_line = {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
}
debit_line = dict(base_line, account_id=debit_account_id)
credit_line = dict(base_line, account_id=credit_account_id)
diff = line.additional_landed_cost
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
debit_line = dict(debit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out)
credit_line = dict(credit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out)
diff = diff * qty_out / line.quantity
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
# float_compare returns 0 for equal amounts
res = not bool(float_compare(tot, landed_cost.amount_total, precision_digits=prec))
for costl in costcor.keys():
if float_compare(costcor[costl], costl.price_unit, precision_digits=prec):
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if cost.state != 'draft':
raise Warning(_('Only draft landed costs can be validated'))
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
print value
quant_obj.write(cr, uid, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
cost = self.browse(cr, uid, ids, context=context)
if cost.state == 'done':
raise Warning(_('Validated landed costs cannot be cancelled, '
'but you could create negative landed costs to reverse them'))
return cost.write({'state': 'cancel'})
def unlink(self, cr, uid, ids, context=None):
# cancel or raise first
self.button_cancel(cr, uid, ids, context)
return super(stock_landed_cost, self).unlink(cr, uid, ids, context=context)
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/addons/stock_landed_costs/stock_landed_costs.py | Python | agpl-3.0 | 19,099 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import unittest
from astroid import builder, nodes
from astroid.node_classes import are_exclusive
builder = builder.AstroidBuilder()
class AreExclusiveTC(unittest.TestCase):
def test_not_exclusive(self):
astroid = builder.string_build("""
x = 10
for x in range(5):
print (x)
if x > 0:
print ('#' * x)
""", __name__, __file__)
xass1 = astroid.locals['x'][0]
assert xass1.lineno == 2
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'x']
assert len(xnames) == 3
assert xnames[1].lineno == 6
self.assertEqual(are_exclusive(xass1, xnames[1]), False)
self.assertEqual(are_exclusive(xass1, xnames[2]), False)
def test_if(self):
astroid = builder.string_build('''
if 1:
a = 1
a = 2
elif 2:
a = 12
a = 13
else:
a = 3
a = 4
''')
a1 = astroid.locals['a'][0]
a2 = astroid.locals['a'][1]
a3 = astroid.locals['a'][2]
a4 = astroid.locals['a'][3]
a5 = astroid.locals['a'][4]
a6 = astroid.locals['a'][5]
self.assertEqual(are_exclusive(a1, a2), False)
self.assertEqual(are_exclusive(a1, a3), True)
self.assertEqual(are_exclusive(a1, a5), True)
self.assertEqual(are_exclusive(a3, a5), True)
self.assertEqual(are_exclusive(a3, a4), False)
self.assertEqual(are_exclusive(a5, a6), False)
def test_try_except(self):
astroid = builder.string_build('''
try:
def exclusive_func2():
"docstring"
except TypeError:
def exclusive_func2():
"docstring"
except:
def exclusive_func2():
"docstring"
else:
def exclusive_func2():
"this one redefine the one defined line 42"
''')
f1 = astroid.locals['exclusive_func2'][0]
f2 = astroid.locals['exclusive_func2'][1]
f3 = astroid.locals['exclusive_func2'][2]
f4 = astroid.locals['exclusive_func2'][3]
self.assertEqual(are_exclusive(f1, f2), True)
self.assertEqual(are_exclusive(f1, f3), True)
self.assertEqual(are_exclusive(f1, f4), False)
self.assertEqual(are_exclusive(f2, f4), True)
self.assertEqual(are_exclusive(f3, f4), True)
self.assertEqual(are_exclusive(f3, f2), True)
self.assertEqual(are_exclusive(f2, f1), True)
self.assertEqual(are_exclusive(f4, f1), False)
self.assertEqual(are_exclusive(f4, f2), True)
if __name__ == '__main__':
unittest.main()
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/astroid/tests/unittest_utils.py | Python | agpl-3.0 | 3,313 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Voropp(MakefilePackage):
"""Voro++ is a open source software library for the computation of the
Voronoi diagram, a widely-used tessellation that has applications in many
scientific fields."""
homepage = "http://math.lbl.gov/voro++/about.html"
url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz"
variant('pic', default=True,
description='Position independent code')
version('0.4.6', sha256='ef7970071ee2ce3800daa8723649ca069dc4c71cc25f0f7d22552387f3ea437e')
def edit(self, spec, prefix):
filter_file(r'CC=g\+\+',
'CC={0}'.format(self.compiler.cxx),
'config.mk')
filter_file(r'PREFIX=/usr/local',
'PREFIX={0}'.format(self.prefix),
'config.mk')
# We can safely replace the default CFLAGS which are:
# CFLAGS=-Wall -ansi -pedantic -O3
cflags = ''
if '+pic' in spec:
cflags += self.compiler.cc_pic_flag
filter_file(r'CFLAGS=.*',
'CFLAGS={0}'.format(cflags),
'config.mk')
| iulian787/spack | var/spack/repos/builtin/packages/voropp/package.py | Python | lgpl-2.1 | 1,349 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dtbuild2(Package):
"""Simple package which acts as a build dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtbuild2-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
def install(self, spec, prefix):
pass
| TheTimmy/spack | var/spack/repos/builtin.mock/packages/dtbuild2/package.py | Python | lgpl-2.1 | 1,542 |
#!/usr/bin/python
from pyparsing import *
from charm.toolbox.node import *
import string
objStack = []
def createAttribute(s, loc, toks):
if toks[0] == '!':
newtoks = ""
for i in toks:
newtoks += i
return BinNode(newtoks)
return BinNode(toks[0]) # create
# convert 'attr < value' to a binary tree based on 'or' and 'and'
def parseNumConditional(s, loc, toks):
print("print: %s" % toks)
return BinNode(toks[0])
def printStuff(s, loc, toks):
print("print: %s" % toks)
return toks
def pushFirst( s, loc, toks ):
objStack.append( toks[0] )
def createTree(op, node1, node2):
if(op == "or"):
node = BinNode(OpType.OR)
elif(op == "and"):
node = BinNode(OpType.AND)
else:
return None
node.addSubNode(node1, node2)
return node
class PolicyParser:
def __init__(self, verbose=False):
self.finalPol = self.getBNF()
self.verbose = verbose
def getBNF(self):
# supported operators => (OR, AND, <
OperatorOR = Literal("OR").setParseAction(downcaseTokens) | Literal("or")
OperatorAND = Literal("AND").setParseAction(downcaseTokens) | Literal("and")
Operator = OperatorAND | OperatorOR
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
BinOperator = Literal("<=") | Literal(">=") | Literal("==") | Word("<>", max=1)
# describes an individual leaf node
leafNode = (Optional("!") + Word(alphanums+'-_./\?!@#$^&*%')).setParseAction( createAttribute )
# describes expressions such as (attr < value)
leafConditional = (Word(alphanums) + BinOperator + Word(nums)).setParseAction( parseNumConditional )
# describes the node concept
node = leafConditional | leafNode
expr = Forward()
term = Forward()
atom = lpar + expr + rpar | (node).setParseAction( pushFirst )
term = atom + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
expr << term + ZeroOrMore((Operator + term).setParseAction( pushFirst ))
finalPol = expr#.setParseAction( printStuff )
return finalPol
def evalStack(self, stack):
op = stack.pop()
if op in ["or", "and"]:
op2 = self.evalStack(stack)
op1 = self.evalStack(stack)
return createTree(op, op1, op2)
else:
# Node value (attribute)
return op
def parse(self, string):
global objStack
del objStack[:]
self.finalPol.parseString(string)
return self.evalStack(objStack)
def findDuplicates(self, tree, _dict):
if tree.left: self.findDuplicates(tree.left, _dict)
if tree.right: self.findDuplicates(tree.right, _dict)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dict.get(key) == None: _dict[ key ] = 1
else: _dict[ key ] += 1
def labelDuplicates(self, tree, _dictLabel):
if tree.left: self.labelDuplicates(tree.left, _dictLabel)
if tree.right: self.labelDuplicates(tree.right, _dictLabel)
if tree.getNodeType() == OpType.ATTR:
key = tree.getAttribute()
if _dictLabel.get(key) != None:
tree.index = _dictLabel[ key ]
_dictLabel[ key ] += 1
def prune(self, tree, attributes):
"""given policy tree and attributes, determine whether the attributes satisfy the policy.
if not enough attributes to satisfy policy, return None otherwise, a pruned list of
attributes to potentially recover the associated secret.
"""
(policySatisfied, prunedList) = self.requiredAttributes(tree, attributes)
# print("pruned attrs: ", prunedList)
# if prunedList:
# for i in prunedList:
# print("node: ", i)
if not policySatisfied:
return policySatisfied
return prunedList
def requiredAttributes(self, tree, attrList):
""" determines the required attributes to satisfy policy tree and returns a list of BinNode
objects."""
if tree == None: return 0
Left = tree.getLeft()
Right = tree.getRight()
if Left: resultLeft, leftAttr = self.requiredAttributes(Left, attrList)
if Right: resultRight, rightAttr = self.requiredAttributes(Right, attrList)
if(tree.getNodeType() == OpType.OR):
# never return both attributes, basically the first one that matches from left to right
if resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft or resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
if(tree.getNodeType() == OpType.AND):
if resultLeft and resultRight: sendThis = leftAttr + rightAttr
elif resultLeft: sendThis = leftAttr
elif resultRight: sendThis = rightAttr
else: sendThis = None
result = (resultLeft and resultRight)
if result == False: return (False, sendThis)
return (True, sendThis)
elif(tree.getNodeType() == OpType.ATTR):
if(tree.getAttribute() in attrList):
return (True, [tree])
else:
return (False, None)
return
if __name__ == "__main__":
# policy parser test cases
parser = PolicyParser()
attrs = ['1', '3']
print("Attrs in user set: ", attrs)
tree1 = parser.parse("(1 or 2) and (2 and 3))")
print("case 1: ", tree1, ", pruned: ", parser.prune(tree1, attrs))
tree2 = parser.parse("1 or (2 and 3)")
print("case 2: ", tree2, ", pruned: ", parser.prune(tree2, attrs))
tree3 = parser.parse("(1 or 2) and (4 or 3)")
print("case 3: ", tree3, ", pruned: ", parser.prune(tree3, attrs))
| lferr/charm | charm/toolbox/policytree.py | Python | lgpl-3.0 | 6,070 |
#!/usr/bin/env python2
# Extractor for Excel files.
# Mso-dumper is not compatible with Python3. We use sys.executable to
# start the actual extractor, so we need to use python2 too.
import rclexecm
import rclexec1
import xlsxmltocsv
import re
import sys
import os
import xml.sax
class XLSProcessData:
def __init__(self, em, ishtml = False):
self.em = em
self.out = ""
self.gotdata = 0
self.xmldata = ""
self.ishtml = ishtml
def takeLine(self, line):
if self.ishtml:
self.out += line + "\n"
return
if not self.gotdata:
self.out += '''<html><head>''' + \
'''<meta http-equiv="Content-Type" ''' + \
'''content="text/html;charset=UTF-8">''' + \
'''</head><body><pre>'''
self.gotdata = True
self.xmldata += line
def wrapData(self):
if self.ishtml:
return self.out
handler = xlsxmltocsv.XlsXmlHandler()
data = xml.sax.parseString(self.xmldata, handler)
self.out += self.em.htmlescape(handler.output)
return self.out + '''</pre></body></html>'''
class XLSFilter:
def __init__(self, em):
self.em = em
self.ntry = 0
def reset(self):
self.ntry = 0
pass
def getCmd(self, fn):
if self.ntry:
return ([], None)
self.ntry = 1
# Some HTML files masquerade as XLS
try:
data = open(fn, 'rb').read(512)
if data.find('html') != -1 or data.find('HTML') != -1:
return ("cat", XLSProcessData(self.em, True))
except Exception as err:
self.em.rclog("Error reading %s:%s" % (fn, str(err)))
pass
cmd = rclexecm.which("xls-dump.py")
if cmd:
# xls-dump.py often exits 1 with valid data. Ignore exit value
return ([sys.executable, cmd, "--dump-mode=canonical-xml", \
"--utf-8", "--catch"],
XLSProcessData(self.em), rclexec1.Executor.opt_ignxval)
else:
return ([], None)
if __name__ == '__main__':
if not rclexecm.which("xls-dump.py"):
print("RECFILTERROR HELPERNOTFOUND ppt-dump.py")
sys.exit(1)
proto = rclexecm.RclExecM()
filter = XLSFilter(proto)
extract = rclexec1.Executor(proto, filter)
rclexecm.main(proto, extract)
| CellulaProject/icc.cellula | src/icc/data/recoll/filters/rclxls.py | Python | lgpl-3.0 | 2,469 |
# Copyright 2011 Gilt Groupe, INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The idrac6 module contains all the functionality that mothership will need
to interact with the Dell DRACs using pexpect and some IPMI commands
"""
import os
import sys
import re
import pexpect
import subprocess
def check_known_hosts(host, user='~'):
# check for ssh host key
hasKey = False
userknownhosts = os.path.expanduser('%s/.ssh/known_hosts' % user)
for file in [ '/etc/ssh/ssh_known_hosts', userknownhosts ]:
for line in open(file):
if host in line:
hasKey = True
break
if not hasKey:
print '+=== Adding %s to known_hosts' % host
key = subprocess.Popen(['ssh-keyscan', host],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).\
communicate()[0]
f = open(userknownhosts, 'a')
f.write(key)
def query_idrac(cfg, host):
check_known_hosts(host)
child = pexpect.spawn('ssh %s@%s' % (cfg.puser, host))
child.expect('assword:')
child.sendline(cfg.ppass)
child.expect('/admin1->')
child.sendline('racadm getsysinfo')
child.expect('/admin1->')
data = child.before
child.sendline('exit')
child.expect('CLP Session terminated')
return data
def sysinfo(info):
"""
Parses information returned from drac getsysinfo command
and returns it as a dictionary
"""
out={}
parsers = [
r'(Firmware Version\s+=\s(?P<firmware>[.\d]+))',
r'(System BIOS Version\s+=\s(?P<bios>[.\d]+))',
r'(System Model\s+=\s(?P<model>[ \w]+))',
r'(Service Tag\s+=\s(?P<hw_tag>\w+))',
r'(MAC Address\s+=\s(?P<drac>[a-f:0-9]+))',
r'(?P<hwname>NIC\d+)\sEthernet\s+=\s(?P<hwaddr>[a-f:0-9]+)',
]
for line in info.split('\n'):
for pattern in parsers:
match = re.search(pattern, line)
if match:
if 'hwname' in match.groupdict().keys():
num = match.group('hwname').replace('NIC','')
out['eth%d' % (int(num)-1)] = match.group('hwaddr')
else:
for key in match.groupdict().keys():
out[key] = match.group(key)
if key == 'model' and re.match('PowerEdge', match.group(key)):
out['manufacturer'] = 'Dell'
return out
def prep_idrac(cfg, host, debug=False, basics=True, ipmi=False, serial=False, telnet=None, gold=False):
check_known_hosts(host)
try:
check_known_hosts(host, '~root')
except IOError, e:
print '%s\nDRAC prep must be run as root/sudo' % e
sys.exit(1)
configured = False
pubkeys = []
for p in cfg.dkeys:
f = open('%s/.ssh/id_dsa.pub' % os.path.expanduser('~%s' % p))
pubkeys.append(f.read().rstrip())
f.close()
adm_pre = 'racadm config -g cfgUserAdmin -i'
adm_obj = [
'-o cfgUserAdminUserName %s' % cfg.puser,
'-o cfgUserAdminPassword %s' % cfg.ppass,
'-o cfgUserAdminPrivilege 0x000001ff',
'-o cfgUserAdminIpmiLanPrivilege 4',
'-o cfgUserAdminIpmiSerialPrivilege 4',
'-o cfgUserAdminSolEnable 1',
'-o cfgUserAdminEnable 1' ]
# login as default root
print '+=== Prepping DRAC for %s' % host
child = pexpect.spawn('ssh %s@%s' % (cfg.duser,host))
child.expect('assword:')
child.sendline(cfg.ddell)
ans = child.expect([ 'Permission denied', '/admin1->', pexpect.TIMEOUT ])
if ans == 0:
# login as power user
print '+- Default root denied, attempting %s alternate' % cfg.puser
child = pexpect.spawn('ssh %s@%s' % (cfg.puser,host))
child.expect('assword:')
child.sendline(cfg.ppass)
newans = child.expect([ 'Permission denied', '/admin1->'])
if newans == 0:
print '+- Alternate %s failed, exiting' % cfg.puser
sys.exit(2)
userdata = 'default root disabled'
configured = True
elif ans == 1:
# configure new admin user
print '+- Adding DRAC user: %s' % cfg.puser
for c in adm_obj:
child.sendline('%s 3 %s' % (adm_pre,c))
child.expect('/admin1->')
child.sendline('racadm getconfig -u %s' % cfg.puser)
child.expect('/admin1->')
userdata = '\n'.join(child.before.split('\n')[1:])
if debug: print userdata
elif ans == 2:
# timeout
print '+- Default root login timed out, unknown error'
sys.exit(2)
if basics or ipmi: # enable IPMI
print '+- Enabling IPMI'
child.sendline('racadm config -g cfgIpmiLan -o cfgIpmiLanEnable 1')
child.expect('/admin1->')
child.sendline('racadm getconfig -g cfgIpmiLan')
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
if basics or serial: # enable SerialConsole
print '+- Enabling SerialConsole'
child.sendline('racadm config -g cfgSerial -o cfgSerialConsoleEnable 1')
child.expect('/admin1->')
child.sendline('racadm getconfig -g cfgSerial')
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
if telnet is not None: # enable Telnet
print '+- Enabling/Disabling Telnet'
child.sendline('racadm config -g cfgSerial -o cfgSerialTelnetEnable %d' % telnet)
child.expect('/admin1->')
child.sendline('racadm getconfig -g cfgSerial')
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
if basics or gold: # gold = trusted user
# configure new trusted user
adm_obj[0] = '-o cfgUserAdminUserName %s' % cfg.dgold
adm_obj[1] = '-o cfgUserAdminPassword %s' % cfg.dpass
print '+- Adding trusted DRAC user: %s' % cfg.dgold
for c in adm_obj:
child.sendline('%s 4 %s' % (adm_pre,c))
child.expect('/admin1->')
child.sendline('racadm getconfig -u %s' % cfg.dgold)
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
# add keys to trusted user
print '+- Adding keys for trusted user'
for k in pubkeys:
child.sendline('racadm sshpkauth -i 4 -k %d -t "%s"' % (pubkeys.index(k)+1, k))
child.expect('/admin1->')
child.sendline('racadm sshpkauth -v -i 4 -k all')
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
# alter password for root user
if cfg.puser in userdata:
print '+- Changing password for: %s' % cfg.duser
child.sendline('%s 2 -o cfgUserAdminPassword %s' % (adm_pre,cfg.dpass))
child.expect('/admin1->')
if debug: print '\n'.join(child.before.split('\n')[1:])
else:
print '+- Skipping password change for: %s' % cfg.duser
if not configured: print ' because %s was not successfully created' % cfg.puser
# leaving drac
print '+- Exiting DRAC'
child.sendline('exit')
child.expect('CLP Session terminated')
if basics or ipmi: # enable IPMI, continued
# settings new admin user privileges with IPMI (apparently racadm was not enough)
print '+- Updating IPMI privileges for non-root users'
os.system('/usr/bin/ipmitool -H %s -U root -P %s user priv 3 4' % (host, cfg.dpass))
os.system('/usr/bin/ipmitool -H %s -U root -P %s user priv 4 4' % (host, cfg.dpass))
if debug: os.system('/usr/bin/ipmitool -H %s -U root -P %s user list' % (host, cfg.dpass))
| downneck/mothership | mothership/idrac6/__init__.py | Python | apache-2.0 | 8,163 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import, too-many-lines
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
from __builtin__ import sum as py_sum
except ImportError:
from builtins import slice as py_slice
from builtins import sum as py_sum
import ctypes
import warnings
from array import array as native_array
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray"]
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array_buf, mx_real_t, integer_types
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context
from . import _internal
from . import op
try:
from .gen_sparse import * # pylint: disable=redefined-builtin
except ImportError:
pass
from ._internal import _set_ndarray_class
from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, shape):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
@property
def size(self):
# the `size` for a sparse ndarray is ambiguous, hence disabled.
raise NotImplementedError()
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the Compressed Sparse Row format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored
in ``data[indptr[i]:indptr[i+1]]``.
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
See Also
--------
csr_matrix: Several ways to construct a CSRNDArray
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
>>> a.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> a[1:2].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[1].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[-1].asnumpy()
array([[ 4., 5., 6.]], dtype=float32)
"""
if isinstance(key, int):
if key == -1:
begin = self.shape[0] - 1
else:
begin = key
return op.slice(self, begin=begin, end=begin+1)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones('row_sparse', (3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'csr':
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`. The number of dimensions has to be at least 2.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0] with values sorted in ascending order.
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
See Also
--------
row_sparse_array: Several ways to construct a RowSparseNDArray
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to RowSparseNDArray " \
"is not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'row_sparse':
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def retain(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`retain`.
The arguments are the same as for :py:func:`retain`, with
this array as data.
"""
return retain(self, *args, **kwargs)
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
storage_type = 'csr'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type: %s"%stype)
_set_ndarray_class(_ndarray_cls)
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
if stype == 'row_sparse' or stype == 'csr':
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype == 'csr' or stype == 'row_sparse':
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = Context.default_ctx if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
| madjam/mxnet | python/mxnet/ndarray/sparse.py | Python | apache-2.0 | 50,947 |
from __future__ import absolute_import
from django.db import models
from django.test import TestCase
from django.utils import timezone
from analytics.lib.counts import CountStat, COUNT_STATS, process_count_stat, \
zerver_count_user_by_realm, zerver_count_message_by_user, \
zerver_count_message_by_stream, zerver_count_stream_by_realm, \
do_fill_count_stat_at_hour, ZerverCountQuery
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, FillState, installation_epoch
from zerver.models import Realm, UserProfile, Message, Stream, Recipient, \
Huddle, Client, get_user_profile_by_email, get_client
from datetime import datetime, timedelta
from six.moves import range
from typing import Any, Type, Optional, Text, Tuple, List, Union
class AnalyticsTestCase(TestCase):
MINUTE = timedelta(seconds = 60)
HOUR = MINUTE * 60
DAY = HOUR * 24
TIME_ZERO = datetime(1988, 3, 14).replace(tzinfo=timezone.utc)
TIME_LAST_HOUR = TIME_ZERO - HOUR
def setUp(self):
# type: () -> None
self.default_realm = Realm.objects.create(
string_id='realmtest', name='Realm Test',
domain='test.analytics', date_created=self.TIME_ZERO - 2*self.DAY)
# used to generate unique names in self.create_*
self.name_counter = 100
# used as defaults in self.assertCountEquals
self.current_property = None # type: Optional[str]
# Lightweight creation of users, streams, and messages
def create_user(self, **kwargs):
# type: (**Any) -> UserProfile
self.name_counter += 1
defaults = {
'email': 'user%s@domain.tld' % (self.name_counter,),
'date_joined': self.TIME_LAST_HOUR,
'full_name': 'full_name',
'short_name': 'short_name',
'pointer': -1,
'last_pointer_updater': 'seems unused?',
'realm': self.default_realm,
'api_key': '42'}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return UserProfile.objects.create(**kwargs)
def create_stream_with_recipient(self, **kwargs):
# type: (**Any) -> Tuple[Stream, Recipient]
self.name_counter += 1
defaults = {'name': 'stream name %s' % (self.name_counter,),
'realm': self.default_realm,
'date_created': self.TIME_LAST_HOUR}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
stream = Stream.objects.create(**kwargs)
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
return stream, recipient
def create_huddle_with_recipient(self, **kwargs):
# type: (**Any) -> Tuple[Huddle, Recipient]
self.name_counter += 1
defaults = {'huddle_hash': 'hash%s' % (self.name_counter,)}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
huddle = Huddle.objects.create(**kwargs)
recipient = Recipient.objects.create(type_id=huddle.id, type=Recipient.HUDDLE)
return huddle, recipient
def create_message(self, sender, recipient, **kwargs):
# type: (UserProfile, Recipient, **Any) -> Message
defaults = {
'sender': sender,
'recipient': recipient,
'subject': 'subject',
'content': 'hi',
'pub_date': self.TIME_LAST_HOUR,
'sending_client': get_client("website")}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return Message.objects.create(**kwargs)
# kwargs should only ever be a UserProfile or Stream.
def assertCountEquals(self, table, value, property=None, subgroup=None,
end_time=TIME_ZERO, realm=None, **kwargs):
# type: (Type[BaseCount], int, Optional[Text], Optional[Text], datetime, Optional[Realm], **models.Model) -> None
if property is None:
property = self.current_property
queryset = table.objects.filter(property=property, end_time=end_time).filter(**kwargs)
if table is not InstallationCount:
if realm is None:
realm = self.default_realm
queryset = queryset.filter(realm=realm)
if subgroup is not None:
queryset = queryset.filter(subgroup=subgroup)
self.assertEqual(queryset.values_list('value', flat=True)[0], value)
def assertTableState(self, table, arg_keys, arg_values):
# type: (Type[BaseCount], List[str], List[List[Union[int, str, Realm, UserProfile, Stream]]]) -> None
"""Assert that the state of a *Count table is what it should be.
Example usage:
self.assertTableState(RealmCount, ['property', 'subgroup', 'realm'],
[['p1', 4], ['p2', 10, self.alt_realm]])
table -- A *Count table.
arg_keys -- List of columns of <table>.
arg_values -- List of "rows" of <table>.
Each entry of arg_values (e.g. ['p1', 4]) represents a row of <table>.
The i'th value of the entry corresponds to the i'th arg_key, so e.g.
the first arg_values entry here corresponds to a row of RealmCount
with property='p1' and subgroup=10.
Any columns not specified (in this case, every column of RealmCount
other than property and subgroup) are either set to default values,
or are ignored.
The function checks that every entry of arg_values matches exactly one
row of <table>, and that no additional rows exist. Note that this means
checking a table with duplicate rows is not supported.
"""
defaults = {
'property': self.current_property,
'subgroup': None,
'end_time': self.TIME_ZERO}
for values in arg_values:
kwargs = {} # type: Dict[str, Any]
for i in range(len(values)):
kwargs[arg_keys[i]] = values[i]
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
if table is not InstallationCount:
if 'realm' not in kwargs:
if 'user' in kwargs:
kwargs['realm'] = kwargs['user'].realm
elif 'stream' in kwargs:
kwargs['realm'] = kwargs['stream'].realm
else:
kwargs['realm'] = self.default_realm
self.assertEqual(table.objects.filter(**kwargs).count(), 1)
self.assertEqual(table.objects.count(), len(arg_values))
class TestProcessCountStat(AnalyticsTestCase):
def make_dummy_count_stat(self, current_time):
# type: (datetime) -> CountStat
dummy_query = """INSERT INTO analytics_realmcount (realm_id, property, end_time, value)
VALUES (1, 'test stat', '%(end_time)s', 22)""" % {'end_time': current_time}
count_stat = CountStat('test stat', ZerverCountQuery(Recipient, UserCount, dummy_query),
{}, None, CountStat.HOUR, False)
return count_stat
def assertFillStateEquals(self, end_time, state = FillState.DONE, property = None):
# type: (datetime, int, Optional[Text]) -> None
count_stat = self.make_dummy_count_stat(end_time)
if property is None:
property = count_stat.property
fill_state = FillState.objects.filter(property=property).first()
self.assertEqual(fill_state.end_time, end_time)
self.assertEqual(fill_state.state, state)
def test_process_stat(self):
# type: () -> None
# process new stat
current_time = installation_epoch() + self.HOUR
count_stat = self.make_dummy_count_stat(current_time)
property = count_stat.property
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# dirty stat
FillState.objects.filter(property=property).update(state=FillState.STARTED)
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# clean stat, no update
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 1)
# clean stat, with update
current_time = current_time + self.HOUR
count_stat = self.make_dummy_count_stat(current_time)
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property=property).count(), 2)
class TestCountStats(AnalyticsTestCase):
def setUp(self):
# type: () -> None
super(TestCountStats, self).setUp()
# This tests two things for each of the queries/CountStats: Handling
# more than 1 realm, and the time bounds (time_start and time_end in
# the queries).
self.second_realm = Realm.objects.create(
string_id='second-realm', name='Second Realm',
domain='second.analytics', date_created=self.TIME_ZERO-2*self.DAY)
for minutes_ago in [0, 1, 61, 60*24+1]:
creation_time = self.TIME_ZERO - minutes_ago*self.MINUTE
user = self.create_user(email='user-%s@second.analytics' % (minutes_ago,),
realm=self.second_realm, date_joined=creation_time)
recipient = self.create_stream_with_recipient(
name='stream %s' % (minutes_ago,), realm=self.second_realm,
date_created=creation_time)[1]
self.create_message(user, recipient, pub_date=creation_time)
self.hourly_user = UserProfile.objects.get(email='user-1@second.analytics')
self.daily_user = UserProfile.objects.get(email='user-61@second.analytics')
# This realm should not show up in the *Count tables for any of the
# messages_* CountStats
self.no_message_realm = Realm.objects.create(
string_id='no-message-realm', name='No Message Realm',
domain='no.message', date_created=self.TIME_ZERO-2*self.DAY)
self.create_user(realm=self.no_message_realm)
self.create_stream_with_recipient(realm=self.no_message_realm)
# This huddle should not show up anywhere
self.create_huddle_with_recipient()
def test_active_users_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.current_property = stat.property
# To be included
self.create_user(is_bot=True)
self.create_user(is_bot=True, date_joined=self.TIME_ZERO-25*self.HOUR)
self.create_user(is_bot=False)
# To be excluded
self.create_user(is_active=False)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, 'true'], [1, 'false'],
[3, 'false', self.second_realm],
[1, 'false', self.no_message_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[2, 'true'], [5, 'false']])
self.assertTableState(UserCount, [], [])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:is_bot:hour']
self.current_property = stat.property
bot = self.create_user(is_bot=True)
human1 = self.create_user()
human2 = self.create_user()
recipient_human1 = Recipient.objects.create(type_id=human1.id, type=Recipient.PERSONAL)
recipient_stream = self.create_stream_with_recipient()[1]
recipient_huddle = self.create_huddle_with_recipient()[1]
self.create_message(bot, recipient_human1)
self.create_message(bot, recipient_stream)
self.create_message(bot, recipient_huddle)
self.create_message(human1, recipient_human1)
self.create_message(human2, recipient_human1)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[1, 'false', human1], [1, 'false', human2], [3, 'true', bot],
[1, 'false', self.hourly_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, 'false'], [3, 'true'], [1, 'false', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[3, 'false'], [3, 'true']])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_by_message_type(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.current_property = stat.property
# Nothing currently in this stat that is bot related, but so many of
# the rest of our stats make the human/bot distinction that one can
# imagine a later refactoring that will intentionally or
# unintentionally change this. So make one of our users a bot.
user1 = self.create_user(is_bot=True)
user2 = self.create_user()
user3 = self.create_user()
# private streams
recipient_stream1 = self.create_stream_with_recipient(invite_only=True)[1]
recipient_stream2 = self.create_stream_with_recipient(invite_only=True)[1]
self.create_message(user1, recipient_stream1)
self.create_message(user2, recipient_stream1)
self.create_message(user2, recipient_stream2)
# public streams
recipient_stream3 = self.create_stream_with_recipient()[1]
recipient_stream4 = self.create_stream_with_recipient()[1]
self.create_message(user1, recipient_stream3)
self.create_message(user1, recipient_stream4)
self.create_message(user2, recipient_stream3)
# huddles
recipient_huddle1 = self.create_huddle_with_recipient()[1]
recipient_huddle2 = self.create_huddle_with_recipient()[1]
self.create_message(user1, recipient_huddle1)
self.create_message(user2, recipient_huddle2)
# private messages
recipient_user1 = Recipient.objects.create(type_id=user1.id, type=Recipient.PERSONAL)
recipient_user2 = Recipient.objects.create(type_id=user2.id, type=Recipient.PERSONAL)
recipient_user3 = Recipient.objects.create(type_id=user3.id, type=Recipient.PERSONAL)
self.create_message(user1, recipient_user2)
self.create_message(user2, recipient_user1)
self.create_message(user3, recipient_user3)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[1, 'private_stream', user1],
[2, 'private_stream', user2],
[2, 'public_stream', user1],
[1, 'public_stream', user2],
[2, 'private_message', user1],
[2, 'private_message', user2],
[1, 'private_message', user3],
[1, 'public_stream', self.hourly_user],
[1, 'public_stream', self.daily_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[3, 'private_stream'], [3, 'public_stream'], [5, 'private_message'],
[2, 'public_stream', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'],
[[3, 'private_stream'], [5, 'public_stream'], [5, 'private_message']])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_to_recipients_with_same_id(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.current_property = stat.property
user = self.create_user(id=1000)
user_recipient = Recipient.objects.create(type_id=user.id, type=Recipient.PERSONAL)
stream_recipient = self.create_stream_with_recipient(id=1000)[1]
huddle_recipient = self.create_huddle_with_recipient(id=1000)[1]
self.create_message(user, user_recipient)
self.create_message(user, stream_recipient)
self.create_message(user, huddle_recipient)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertCountEquals(UserCount, 2, subgroup='private_message')
self.assertCountEquals(UserCount, 1, subgroup='public_stream')
def test_messages_sent_by_client(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:client:day']
self.current_property = stat.property
user1 = self.create_user(is_bot=True)
user2 = self.create_user()
recipient_user2 = Recipient.objects.create(type_id=user2.id, type=Recipient.PERSONAL)
recipient_stream = self.create_stream_with_recipient()[1]
recipient_huddle = self.create_huddle_with_recipient()[1]
client2 = Client.objects.create(name='client2')
self.create_message(user1, recipient_user2, sending_client=client2)
self.create_message(user1, recipient_stream)
self.create_message(user1, recipient_huddle)
self.create_message(user2, recipient_user2, sending_client=client2)
self.create_message(user2, recipient_user2, sending_client=client2)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
client2_id = str(client2.id)
website_client_id = str(get_client('website').id) # default for self.create_message
self.assertTableState(UserCount, ['value', 'subgroup', 'user'],
[[2, website_client_id, user1],
[1, client2_id, user1], [2, client2_id, user2],
[1, website_client_id, self.hourly_user],
[1, website_client_id, self.daily_user]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[2, website_client_id], [3, client2_id],
[2, website_client_id, self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'],
[[4, website_client_id], [3, client2_id]])
self.assertTableState(StreamCount, [], [])
def test_messages_sent_to_stream_by_is_bot(self):
# type: () -> None
stat = COUNT_STATS['messages_sent_to_stream:is_bot:hour']
self.current_property = stat.property
bot = self.create_user(is_bot=True)
human1 = self.create_user()
human2 = self.create_user()
recipient_human1 = Recipient.objects.create(type_id=human1.id, type=Recipient.PERSONAL)
stream1, recipient_stream1 = self.create_stream_with_recipient()
stream2, recipient_stream2 = self.create_stream_with_recipient()
# To be included
self.create_message(human1, recipient_stream1)
self.create_message(human2, recipient_stream1)
self.create_message(human1, recipient_stream2)
self.create_message(bot, recipient_stream2)
self.create_message(bot, recipient_stream2)
# To be excluded
self.create_message(human2, recipient_human1)
self.create_message(bot, recipient_human1)
recipient_huddle = self.create_huddle_with_recipient()[1]
self.create_message(human1, recipient_huddle)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertTableState(StreamCount, ['value', 'subgroup', 'stream'],
[[2, 'false', stream1], [1, 'false', stream2], [2, 'true', stream2],
# "hourly" stream, from TestCountStats.setUp
[1, 'false', Stream.objects.get(name='stream 1')]])
self.assertTableState(RealmCount, ['value', 'subgroup', 'realm'],
[[3, 'false'], [2, 'true'], [1, 'false', self.second_realm]])
self.assertTableState(InstallationCount, ['value', 'subgroup'], [[4, 'false'], [2, 'true']])
self.assertTableState(UserCount, [], [])
| sonali0901/zulip | analytics/tests/test_counts.py | Python | apache-2.0 | 20,711 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock
from mock import patch
from novaclient import exceptions as nova_exceptions
from trove.common import cfg
from trove.common import exception
import trove.common.remote
from trove.extensions.security_group import models as sec_mod
from trove.instance import models as inst_model
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
"""
Unit tests for testing the exceptions raised by Security Groups
"""
class Security_Group_Exceptions_Test(trove_testtools.TestCase):
def setUp(self):
super(Security_Group_Exceptions_Test, self).setUp()
self.createNovaClient = trove.common.remote.create_nova_client
self.context = trove_testtools.TroveTestContext(self)
self.FakeClient = nova.fake_create_nova_client(self.context)
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException("Test")))
self.FakeClient.security_groups.create = fException
self.FakeClient.security_groups.delete = fException
self.FakeClient.security_group_rules.create = fException
self.FakeClient.security_group_rules.delete = fException
trove.common.remote.create_nova_client = (
lambda c, r: self._return_mocked_nova_client(c))
def tearDown(self):
super(Security_Group_Exceptions_Test, self).tearDown()
trove.common.remote.create_nova_client = self.createNovaClient
def _return_mocked_nova_client(self, context):
return self.FakeClient
def _raise(self, ex):
raise ex
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupCreationError,
sec_mod.RemoteSecurityGroup.create,
"TestName",
"TestDescription",
self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
sec_mod.RemoteSecurityGroup.delete,
1, self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
sec_mod.RemoteSecurityGroup.add_rule,
1, "tcp", 3306, 3306, "0.0.0.0/0", self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
sec_mod.RemoteSecurityGroup.delete_rule,
1, self.context,
region_name=CONF.os_region_name)
class fake_RemoteSecGr(object):
def data(self):
self.id = uuid.uuid4()
return {'id': self.id}
def delete(self, context, region_name):
pass
class fake_SecGr_Association(object):
def get_security_group(self):
return fake_RemoteSecGr()
def delete(self):
pass
class SecurityGroupDeleteTest(trove_testtools.TestCase):
def setUp(self):
super(SecurityGroupDeleteTest, self).setUp()
self.inst_model_conf_patch = patch.object(inst_model, 'CONF')
self.inst_model_conf_mock = self.inst_model_conf_patch.start()
self.addCleanup(self.inst_model_conf_patch.stop)
self.context = trove_testtools.TroveTestContext(self)
self.original_find_by = (
sec_mod.SecurityGroupInstanceAssociation.find_by)
self.original_delete = sec_mod.SecurityGroupInstanceAssociation.delete
self.fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
exception.ModelNotFoundError()))
def tearDown(self):
super(SecurityGroupDeleteTest, self).tearDown()
(sec_mod.SecurityGroupInstanceAssociation.
find_by) = self.original_find_by
(sec_mod.SecurityGroupInstanceAssociation.
delete) = self.original_delete
def _raise(self, ex):
raise ex
def test_failed_to_get_assoc_on_delete(self):
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
uuid.uuid4(), self.context, CONF.os_region_name))
def test_get_security_group_from_assoc_with_db_exception(self):
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException('TEST')))
i_id = uuid.uuid4()
class new_fake_RemoteSecGrAssoc(object):
def get_security_group(self):
return None
def delete(self):
return fException
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=new_fake_RemoteSecGrAssoc())
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context, CONF.os_region_name))
def test_delete_secgr_assoc_with_db_exception(self):
i_id = uuid.uuid4()
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=fake_SecGr_Association())
sec_mod.SecurityGroupInstanceAssociation.delete = self.fException
self.assertIsNotNone(sec_mod.SecurityGroupInstanceAssociation.find_by(
i_id, deleted=False).get_security_group())
self.assertTrue(hasattr(sec_mod.SecurityGroupInstanceAssociation.
find_by(i_id, deleted=False).
get_security_group(), 'delete'))
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context, CONF.os_region_name))
| zhangg/trove | trove/tests/unittests/secgroups/test_security_group.py | Python | apache-2.0 | 6,773 |
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script performs lighthouse checks and creates lighthouse reports.
Any callers must pass in a flag, either --accessibility or --performance.
"""
from __future__ import annotations
import argparse
import contextlib
import os
import subprocess
import sys
from core.constants import constants
from scripts import build
from scripts import common
from scripts import servers
LIGHTHOUSE_MODE_PERFORMANCE = 'performance'
LIGHTHOUSE_MODE_ACCESSIBILITY = 'accessibility'
SERVER_MODE_PROD = 'dev'
SERVER_MODE_DEV = 'prod'
GOOGLE_APP_ENGINE_PORT = 8181
LIGHTHOUSE_CONFIG_FILENAMES = {
LIGHTHOUSE_MODE_PERFORMANCE: {
'1': '.lighthouserc-1.js',
'2': '.lighthouserc-2.js'
},
LIGHTHOUSE_MODE_ACCESSIBILITY: {
'1': '.lighthouserc-accessibility-1.js',
'2': '.lighthouserc-accessibility-2.js'
}
}
APP_YAML_FILENAMES = {
SERVER_MODE_PROD: 'app.yaml',
SERVER_MODE_DEV: 'app_dev.yaml'
}
_PARSER = argparse.ArgumentParser(
description="""
Run the script from the oppia root folder:
python -m scripts.run_lighthouse_tests
Note that the root folder MUST be named 'oppia'.
""")
_PARSER.add_argument(
'--mode', help='Sets the mode for the lighthouse tests',
required=True, choices=['accessibility', 'performance'])
_PARSER.add_argument(
'--shard', help='Sets the shard for the lighthouse tests',
required=True, choices=['1', '2'])
def run_lighthouse_puppeteer_script():
"""Runs puppeteer script to collect dynamic urls."""
puppeteer_path = (
os.path.join('core', 'tests', 'puppeteer', 'lighthouse_setup.js'))
bash_command = [common.NODE_BIN_PATH, puppeteer_path]
process = subprocess.Popen(
bash_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
print(stdout)
for line in stdout.split(b'\n'):
# Standard output is in bytes, we need to decode the line to
# print it.
export_url(line.decode('utf-8'))
print('Puppeteer script completed successfully.')
else:
print('Return code: %s' % process.returncode)
print('OUTPUT:')
# Standard output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print('ERROR:')
# Error output is in bytes, we need to decode the line to
# print it.
print(stderr.decode('utf-8'))
print('Puppeteer script failed. More details can be found above.')
sys.exit(1)
def run_webpack_compilation():
"""Runs webpack compilation."""
max_tries = 5
webpack_bundles_dir_name = 'webpack_bundles'
for _ in range(max_tries):
try:
with servers.managed_webpack_compiler() as proc:
proc.wait()
except subprocess.CalledProcessError as error:
print(error.output)
sys.exit(error.returncode)
if os.path.isdir(webpack_bundles_dir_name):
break
if not os.path.isdir(webpack_bundles_dir_name):
print('Failed to complete webpack compilation, exiting...')
sys.exit(1)
def export_url(line):
"""Exports the entity ID in the given line to an environment variable, if
the line is a URL.
Args:
line: str. The line to parse and extract the entity ID from. If no
recognizable URL is present, nothing is exported to the
environment.
"""
url_parts = line.split('/')
print('Parsing and exporting entity ID in line: %s' % line)
if 'collection_editor' in line:
os.environ['collection_id'] = url_parts[5]
elif 'create' in line:
os.environ['exploration_id'] = url_parts[4]
elif 'topic_editor' in line:
os.environ['topic_id'] = url_parts[4]
elif 'story_editor' in line:
os.environ['story_id'] = url_parts[4]
elif 'skill_editor' in line:
os.environ['skill_id'] = url_parts[4]
def run_lighthouse_checks(lighthouse_mode, shard):
"""Runs the Lighthouse checks through the Lighthouse config.
Args:
lighthouse_mode: str. Represents whether the lighthouse checks are in
accessibility mode or performance mode.
shard: str. Specifies which shard of the tests should be run.
"""
lhci_path = os.path.join('node_modules', '@lhci', 'cli', 'src', 'cli.js')
# The max-old-space-size is a quick fix for node running out of heap memory
# when executing the performance tests: https://stackoverflow.com/a/59572966
bash_command = [
common.NODE_BIN_PATH, lhci_path, 'autorun',
'--config=%s' % LIGHTHOUSE_CONFIG_FILENAMES[lighthouse_mode][shard],
'--max-old-space-size=4096'
]
process = subprocess.Popen(
bash_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
print('Lighthouse checks completed successfully.')
else:
print('Return code: %s' % process.returncode)
print('OUTPUT:')
# Standard output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print('ERROR:')
# Error output is in bytes, we need to decode the line to
# print it.
print(stderr.decode('utf-8'))
print('Lighthouse checks failed. More details can be found above.')
sys.exit(1)
def main(args=None):
"""Runs lighthouse checks and deletes reports."""
parsed_args = _PARSER.parse_args(args=args)
if parsed_args.mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
lighthouse_mode = LIGHTHOUSE_MODE_ACCESSIBILITY
server_mode = SERVER_MODE_DEV
elif parsed_args.mode == LIGHTHOUSE_MODE_PERFORMANCE:
lighthouse_mode = LIGHTHOUSE_MODE_PERFORMANCE
server_mode = SERVER_MODE_PROD
else:
raise Exception(
'Invalid parameter passed in: \'%s\', please choose'
'from \'accessibility\' or \'performance\'' % parsed_args.mode)
if lighthouse_mode == LIGHTHOUSE_MODE_PERFORMANCE:
print('Building files in production mode.')
build.main(args=['--prod_env'])
elif lighthouse_mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
build.main(args=[])
run_webpack_compilation()
with contextlib.ExitStack() as stack:
stack.enter_context(servers.managed_redis_server())
stack.enter_context(servers.managed_elasticsearch_dev_server())
if constants.EMULATOR_MODE:
stack.enter_context(servers.managed_firebase_auth_emulator())
stack.enter_context(servers.managed_cloud_datastore_emulator())
stack.enter_context(servers.managed_dev_appserver(
APP_YAML_FILENAMES[server_mode],
port=GOOGLE_APP_ENGINE_PORT,
log_level='critical',
skip_sdk_update_check=True))
run_lighthouse_puppeteer_script()
run_lighthouse_checks(lighthouse_mode, parsed_args.shard)
if __name__ == '__main__':
main()
| brianrodri/oppia | scripts/run_lighthouse_tests.py | Python | apache-2.0 | 7,645 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expression for identity"""
from tvm import te
from .dma import read_compute, write_compute
def identity_compute(
ifm: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
activation: str,
) -> te.Tensor:
"""A compute operator for the NPU identity operator.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation is "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
dmaed_ifm = read_compute(ifm, ifm_zero_point, ifm_scale)
id_attrs = {"op": "ethosu_identity", "activation": activation}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
id_attrs["lut"] = lut
identity = te.compute(
ifm.shape,
lambda *i: (dmaed_ifm(*i) + lut_expr).astype(ifm.dtype),
name="ethosu_identity",
attrs=id_attrs,
)
dmaed_ofm = write_compute(identity, ofm_zero_point, ofm_scale)
return dmaed_ofm
| Laurawly/tvm-1 | python/tvm/relay/backend/contrib/ethosu/te/identity.py | Python | apache-2.0 | 2,862 |
from mongoengine import *
class Class(Document):
name = StringField(required = True)
meta = {
"allow_inheritance": False
}
| ucrcsedept/galah | galah/db/models/classes.py | Python | apache-2.0 | 145 |
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
mks_group = cfg.OptGroup('mks', title='MKS Options')
mks_opts = [
cfg.StrOpt('mksproxy_base_url',
default='http://127.0.0.1:6090/',
help='Location of MKS web console proxy, in the form '
'"http://127.0.0.1:6090/"'),
cfg.BoolOpt('enabled',
default=False,
help='Enable MKS related features'),
]
ALL_MKS_OPTS = mks_opts
def register_opts(conf):
conf.register_group(mks_group)
conf.register_opts(ALL_MKS_OPTS, group = mks_group)
def list_opts():
return {mks_group: ALL_MKS_OPTS}
| bigswitch/nova | nova/conf/mks.py | Python | apache-2.0 | 1,253 |